Rearranging classes under the PG namespace to be a better Ruby citizen

--HG--
rename : .irbrc => .pryrc
rename : spec/pgconn_spec.rb => spec/pg/connection_spec.rb
rename : spec/pgresult_spec.rb => spec/pg/result_spec.rb
This commit is contained in:
Michael Granger 2012-01-24 17:21:30 -08:00
parent 4ddd62abe6
commit ec29fc0c03
35 changed files with 5002 additions and 5440 deletions

23
.pryrc Normal file
View File

@ -0,0 +1,23 @@
#!/usr/bin/ruby -*- ruby -*-
BEGIN {
require 'pathname'
require 'rbconfig'
basedir = Pathname.new( __FILE__ ).dirname.expand_path
libdir = basedir + "lib"
puts ">>> Adding #{libdir} to load path..."
$LOAD_PATH.unshift( libdir.to_s )
}
# Try to require the 'pg' library
begin
$stderr.puts "Loading pg..."
require 'pg'
rescue => e
$stderr.puts "Ack! pg library failed to load: #{e.message}\n\t" +
e.backtrace.join( "\n\t" )
end

View File

@ -3,11 +3,10 @@ projectDirectory = "$CWD"
windowTitle = "${CWD/^.*\///} «$TM_DISPLAYNAME»"
excludeInFileChooser = "{$exclude,.hg}"
[ source.ruby ]
TM_MAKE = 'rake'
TM_MAKE_FILE = '${projectDirectory}/Rakefile'
[ source ]
softTabs = false
tabSize = 4
[ "**/*_spec.rb" ]
fileType = 'text.ruby.rspec'

View File

@ -17,10 +17,16 @@ ext/compat.h
ext/extconf.rb
ext/pg.c
ext/pg.h
ext/pg_connection.c
ext/pg_result.c
ext/vc/pg.sln
ext/vc/pg_18/pg.vcproj
ext/vc/pg_19/pg_19.vcproj
lib/pg.rb
lib/pg/connection.rb
lib/pg/constants.rb
lib/pg/exceptions.rb
lib/pg/result.rb
misc/openssl-pg-segfault.rb
sample/async_api.rb
sample/async_copyto.rb
@ -39,6 +45,6 @@ sample/test_binary_values.rb
spec/data/expected_trace.out
spec/data/random_binary_data
spec/lib/helpers.rb
spec/m17n_spec.rb
spec/pgconn_spec.rb
spec/pgresult_spec.rb
spec/pg/connection_spec.rb
spec/pg/result_spec.rb
spec/pg_spec.rb

View File

@ -6,18 +6,14 @@
Pg is the Ruby interface to the {PostgreSQL RDBMS}[http://www.postgresql.org/].
It works with PostgreSQL 8.2 and later.
This will be the last minor version to support 8.2 -- 0.13 will support 8.3
and later, following the
{PostgreSQL Release Support Policy}[http://bit.ly/6AfPhm].
It works with {PostgreSQL 8.3 and later}[http://bit.ly/6AfPhm].
== Requirements
* Ruby 1.8.7-p249 or later.
* PostgreSQL 8.2.x or later installed.
* PostgreSQL 8.3.x or later installed.
It may work with earlier versions as well, but those are not regularly tested.

View File

@ -1,541 +0,0 @@
/************************************************
compat.c -
Author: matz
created at: Tue May 13 20:07:35 JST 1997
Author: ematsu
modified at: Wed Jan 20 16:41:51 1999
$Author$
$Date$
************************************************/
#include <ctype.h>
#include "compat.h"
#ifdef PG_BEFORE_080300
int
PQconnectionNeedsPassword(PGconn *conn)
{
rb_raise(rb_eStandardError,
"PQconnectionNeedsPassword not supported by this client version.");
}
int
PQconnectionUsedPassword(PGconn *conn)
{
rb_raise(rb_eStandardError,
"PQconnectionUsedPassword not supported by this client version.");
}
int
lo_truncate(PGconn *conn, int fd, size_t len)
{
rb_raise(rb_eStandardError, "lo_truncate not supported by this client version.");
}
#endif /* PG_BEFORE_080300 */
#ifdef PG_BEFORE_080200
int
PQisthreadsafe()
{
return Qfalse;
}
int
PQnparams(const PGresult *res)
{
rb_raise(rb_eStandardError, "PQnparams not supported by this client version.");
}
Oid
PQparamtype(const PGresult *res, int param_number)
{
rb_raise(rb_eStandardError, "PQparamtype not supported by this client version.");
}
PGresult *
PQdescribePrepared(PGconn *conn, const char *stmtName)
{
rb_raise(rb_eStandardError, "PQdescribePrepared not supported by this client version.");
}
PGresult *
PQdescribePortal(PGconn *conn, const char *portalName)
{
rb_raise(rb_eStandardError, "PQdescribePortal not supported by this client version.");
}
int
PQsendDescribePrepared(PGconn *conn, const char *stmtName)
{
rb_raise(rb_eStandardError, "PQsendDescribePrepared not supported by this client version.");
}
int
PQsendDescribePortal(PGconn *conn, const char *portalName)
{
rb_raise(rb_eStandardError, "PQsendDescribePortal not supported by this client version.");
}
char *
PQencryptPassword(const char *passwd, const char *user)
{
rb_raise(rb_eStandardError, "PQencryptPassword not supported by this client version.");
}
#endif /* PG_BEFORE_080200 */
#ifdef PG_BEFORE_080100
Oid
lo_create(PGconn *conn, Oid lobjId)
{
rb_raise(rb_eStandardError, "lo_create not supported by this client version.");
}
#endif /* PG_BEFORE_080100 */
#ifdef PG_BEFORE_080000
PGresult *
PQprepare(PGconn *conn, const char *stmtName, const char *query,
int nParams, const Oid *paramTypes)
{
rb_raise(rb_eStandardError, "PQprepare not supported by this client version.");
}
int
PQsendPrepare(PGconn *conn, const char *stmtName, const char *query,
int nParams, const Oid *paramTypes)
{
rb_raise(rb_eStandardError, "PQsendPrepare not supported by this client version.");
}
int
PQserverVersion(const PGconn* conn)
{
rb_raise(rb_eStandardError, "PQserverVersion not supported by this client version.");
}
#endif /* PG_BEFORE_080000 */
#ifdef PG_BEFORE_070400
PGresult *
PQexecParams(PGconn *conn, const char *command, int nParams,
const Oid *paramTypes, const char * const * paramValues, const int *paramLengths,
const int *paramFormats, int resultFormat)
{
rb_raise(rb_eStandardError, "PQexecParams not supported by this client version.");
}
PGTransactionStatusType
PQtransactionStatus(const PGconn *conn)
{
rb_raise(rb_eStandardError, "PQtransactionStatus not supported by this client version.");
}
char *
PQparameterStatus(const PGconn *conn, const char *paramName)
{
rb_raise(rb_eStandardError, "PQparameterStatus not supported by this client version.");
}
int
PQprotocolVersion(const PGconn *conn)
{
rb_raise(rb_eStandardError, "PQprotocolVersion not supported by this client version.");
}
PGresult
*PQexecPrepared(PGconn *conn, const char *stmtName, int nParams,
const char * const *ParamValues, const int *paramLengths, const int *paramFormats,
int resultFormat)
{
rb_raise(rb_eStandardError, "PQexecPrepared not supported by this client version.");
}
int
PQsendQueryParams(PGconn *conn, const char *command, int nParams,
const Oid *paramTypes, const char * const * paramValues, const int *paramLengths,
const int *paramFormats, int resultFormat)
{
rb_raise(rb_eStandardError, "PQsendQueryParams not supported by this client version.");
}
int
PQsendQueryPrepared(PGconn *conn, const char *stmtName, int nParams,
const char * const *ParamValues, const int *paramLengths, const int *paramFormats,
int resultFormat)
{
rb_raise(rb_eStandardError, "PQsendQueryPrepared not supported by this client version.");
}
int
PQputCopyData(PGconn *conn, const char *buffer, int nbytes)
{
rb_raise(rb_eStandardError, "PQputCopyData not supported by this client version.");
}
int
PQputCopyEnd(PGconn *conn, const char *errormsg)
{
rb_raise(rb_eStandardError, "PQputCopyEnd not supported by this client version.");
}
int
PQgetCopyData(PGconn *conn, char **buffer, int async)
{
rb_raise(rb_eStandardError, "PQgetCopyData not supported by this client version.");
}
PGVerbosity
PQsetErrorVerbosity(PGconn *conn, PGVerbosity verbosity)
{
rb_raise(rb_eStandardError, "PQsetErrorVerbosity not supported by this client version.");
}
Oid
PQftable(const PGresult *res, int column_number)
{
rb_raise(rb_eStandardError, "PQftable not supported by this client version.");
}
int
PQftablecol(const PGresult *res, int column_number)
{
rb_raise(rb_eStandardError, "PQftablecol not supported by this client version.");
}
int
PQfformat(const PGresult *res, int column_number)
{
rb_raise(rb_eStandardError, "PQfformat not supported by this client version.");
}
PQnoticeReceiver
PQsetNoticeReceiver(PGconn *conn, PQnoticeReceiver proc, void *arg)
{
rb_raise(rb_eStandardError, "PQsetNoticeReceiver not supported by this client version.");
}
char *
PQresultErrorField(const PGresult *res, int fieldcode)
{
rb_raise(rb_eStandardError, "PQresultErrorField not supported by this client version.");
}
#endif /* PG_BEFORE_070400 */
#ifdef PG_BEFORE_070300
size_t
PQescapeStringConn(PGconn *conn, char *to, const char *from,
size_t length, int *error)
{
return PQescapeString(to,from,length);
}
unsigned char *
PQescapeByteaConn(PGconn *conn, const unsigned char *from,
size_t from_length, size_t *to_length)
{
return PQescapeBytea(from, from_length, to_length);
}
#endif /* PG_BEFORE_070300 */
/**************************************************************************
IF ANY CODE IS COPIED FROM POSTGRESQL, PLACE IT AFTER THIS COMMENT.
***************************************************************************
Portions of code after this point were copied from the PostgreSQL source
distribution, available at http://www.postgresql.org
***************************************************************************
Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
Portions Copyright (c) 1994, The Regents of the University of California
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose, without fee, and without a written agreement
is hereby granted, provided that the above copyright notice and this
paragraph and the following two paragraphs appear in all copies.
IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
**************************************************************************/
#ifndef HAVE_PQSETCLIENTENCODING
int
PQsetClientEncoding(PGconn *conn, const char *encoding)
{
char qbuf[128];
static const char query[] = "set client_encoding to '%s'";
PGresult *res;
int status;
if (!conn || PQstatus(conn) != CONNECTION_OK)
return -1;
if (!encoding)
return -1;
/* check query buffer overflow */
if (sizeof(qbuf) < (sizeof(query) + strlen(encoding)))
return -1;
/* ok, now send a query */
sprintf(qbuf, query, encoding);
res = PQexec(conn, qbuf);
if (res == NULL)
return -1;
if (PQresultStatus(res) != PGRES_COMMAND_OK)
status = -1;
else
{
/*
* In protocol 2 we have to assume the setting will stick, and adjust
* our state immediately. In protocol 3 and up we can rely on the
* backend to report the parameter value, and we'll change state at
* that time.
*/
if (PQprotocolVersion(conn) < 3)
pqSaveParameterStatus(conn, "client_encoding", encoding);
status = 0; /* everything is ok */
}
PQclear(res);
return status;
}
#endif /* HAVE_PQSETCLIENTENCODING */
#ifndef HAVE_PQESCAPESTRING
/*
* Escaping arbitrary strings to get valid SQL literal strings.
*
* Replaces "\\" with "\\\\" and "'" with "''".
*
* length is the length of the source string. (Note: if a terminating NUL
* is encountered sooner, PQescapeString stops short of "length"; the behavior
* is thus rather like strncpy.)
*
* For safety the buffer at "to" must be at least 2*length + 1 bytes long.
* A terminating NUL character is added to the output string, whether the
* input is NUL-terminated or not.
*
* Returns the actual length of the output (not counting the terminating NUL).
*/
size_t
PQescapeString(char *to, const char *from, size_t length)
{
const char *source = from;
char *target = to;
size_t remaining = length;
while (remaining > 0 && *source != '\0')
{
switch (*source)
{
case '\\':
*target++ = '\\';
*target++ = '\\';
break;
case '\'':
*target++ = '\'';
*target++ = '\'';
break;
default:
*target++ = *source;
break;
}
source++;
remaining--;
}
/* Write the terminating NUL character. */
*target = '\0';
return target - to;
}
/*
* PQescapeBytea - converts from binary string to the
* minimal encoding necessary to include the string in an SQL
* INSERT statement with a bytea type column as the target.
*
* The following transformations are applied
* '\0' == ASCII 0 == \\000
* '\'' == ASCII 39 == \'
* '\\' == ASCII 92 == \\\\
* anything < 0x20, or > 0x7e ---> \\ooo
* (where ooo is an octal expression)
*/
unsigned char *
PQescapeBytea(const unsigned char *bintext, size_t binlen, size_t *bytealen)
{
const unsigned char *vp;
unsigned char *rp;
unsigned char *result;
size_t i;
size_t len;
/*
* empty string has 1 char ('\0')
*/
len = 1;
vp = bintext;
for (i = binlen; i > 0; i--, vp++)
{
if (*vp < 0x20 || *vp > 0x7e)
len += 5; /* '5' is for '\\ooo' */
else if (*vp == '\'')
len += 2;
else if (*vp == '\\')
len += 4;
else
len++;
}
rp = result = (unsigned char *) malloc(len);
if (rp == NULL)
return NULL;
vp = bintext;
*bytealen = len;
for (i = binlen; i > 0; i--, vp++)
{
if (*vp < 0x20 || *vp > 0x7e)
{
(void) sprintf(rp, "\\\\%03o", *vp);
rp += 5;
}
else if (*vp == '\'')
{
rp[0] = '\\';
rp[1] = '\'';
rp += 2;
}
else if (*vp == '\\')
{
rp[0] = '\\';
rp[1] = '\\';
rp[2] = '\\';
rp[3] = '\\';
rp += 4;
}
else
*rp++ = *vp;
}
*rp = '\0';
return result;
}
#define ISFIRSTOCTDIGIT(CH) ((CH) >= '0' && (CH) <= '3')
#define ISOCTDIGIT(CH) ((CH) >= '0' && (CH) <= '7')
#define OCTVAL(CH) ((CH) - '0')
/*
* PQunescapeBytea - converts the null terminated string representation
* of a bytea, strtext, into binary, filling a buffer. It returns a
* pointer to the buffer (or NULL on error), and the size of the
* buffer in retbuflen. The pointer may subsequently be used as an
* argument to the function free(3). It is the reverse of PQescapeBytea.
*
* The following transformations are made:
* \\ == ASCII 92 == \
* \ooo == a byte whose value = ooo (ooo is an octal number)
* \x == x (x is any character not matched by the above transformations)
*/
unsigned char *
PQunescapeBytea(const unsigned char *strtext, size_t *retbuflen)
{
size_t strtextlen,
buflen;
unsigned char *buffer,
*tmpbuf;
size_t i,
j;
if (strtext == NULL)
return NULL;
strtextlen = strlen(strtext);
/*
* Length of input is max length of output, but add one to avoid
* unportable malloc(0) if input is zero-length.
*/
buffer = (unsigned char *) malloc(strtextlen + 1);
if (buffer == NULL)
return NULL;
for (i = j = 0; i < strtextlen;)
{
switch (strtext[i])
{
case '\\':
i++;
if (strtext[i] == '\\')
buffer[j++] = strtext[i++];
else
{
if ((ISFIRSTOCTDIGIT(strtext[i])) &&
(ISOCTDIGIT(strtext[i + 1])) &&
(ISOCTDIGIT(strtext[i + 2])))
{
int byte;
byte = OCTVAL(strtext[i++]);
byte = (byte << 3) + OCTVAL(strtext[i++]);
byte = (byte << 3) + OCTVAL(strtext[i++]);
buffer[j++] = byte;
}
}
/*
* Note: if we see '\' followed by something that isn't a
* recognized escape sequence, we loop around having done
* nothing except advance i. Therefore the something will
* be emitted as ordinary data on the next cycle. Corner
* case: '\' at end of string will just be discarded.
*/
break;
default:
buffer[j++] = strtext[i++];
break;
}
}
buflen = j; /* buflen is the length of the dequoted
* data */
/* Shrink the buffer to be no larger than necessary */
/* +1 avoids unportable behavior when buflen==0 */
tmpbuf = realloc(buffer, buflen + 1);
/* It would only be a very brain-dead realloc that could fail, but... */
if (!tmpbuf)
{
free(buffer);
return NULL;
}
*retbuflen = buflen;
return tmpbuf;
}
#endif

View File

@ -1,184 +0,0 @@
#ifndef __compat_h
#define __compat_h
#include <stdlib.h>
#ifdef RUBY_EXTCONF_H
#include RUBY_EXTCONF_H
#endif
#include "libpq-fe.h"
#include "libpq/libpq-fs.h" /* large-object interface */
#include "ruby.h"
/* pg_config.h does not exist in older versions of
* PostgreSQL, so I can't effectively use PG_VERSION_NUM
* Instead, I create some #defines to help organization.
*/
#ifndef HAVE_PQCONNECTIONUSEDPASSWORD
#define PG_BEFORE_080300
#endif
#ifndef HAVE_PQISTHREADSAFE
#define PG_BEFORE_080200
#endif
#ifndef HAVE_LO_CREATE
#define PG_BEFORE_080100
#endif
#ifndef HAVE_PQPREPARE
#define PG_BEFORE_080000
#endif
#ifndef HAVE_PQEXECPARAMS
#define PG_BEFORE_070400
#endif
#ifndef HAVE_PQESCAPESTRINGCONN
#define PG_BEFORE_070300
#error PostgreSQL client version too old, requires 7.3 or later.
#endif
/* This is necessary because NAMEDATALEN is defined in
* pg_config_manual.h in 8.3, and that include file doesn't
* exist before 7.4
*/
#ifndef PG_BEFORE_070400
#include "pg_config_manual.h"
#endif
#ifndef PG_DIAG_INTERNAL_POSITION
#define PG_DIAG_INTERNAL_POSITION 'p'
#endif /* PG_DIAG_INTERNAL_POSITION */
#ifndef PG_DIAG_INTERNAL_QUERY
#define PG_DIAG_INTERNAL_QUERY 'q'
#endif /* PG_DIAG_INTERNAL_QUERY */
#ifdef PG_BEFORE_080300
#ifndef HAVE_PG_ENCODING_TO_CHAR
#define pg_encoding_to_char(x) "SQL_ASCII"
#else
/* Some versions ofPostgreSQL prior to 8.3 define pg_encoding_to_char
* but do not declare it in a header file, so this declaration will
* eliminate an unecessary warning
*/
extern char* pg_encoding_to_char(int);
#endif /* HAVE_PG_ENCODING_TO_CHAR */
int PQconnectionNeedsPassword(PGconn *conn);
int PQconnectionUsedPassword(PGconn *conn);
int lo_truncate(PGconn *conn, int fd, size_t len);
#endif /* PG_BEFORE_080300 */
#ifdef PG_BEFORE_080200
int PQisthreadsafe(void);
int PQnparams(const PGresult *res);
Oid PQparamtype(const PGresult *res, int param_number);
PGresult * PQdescribePrepared(PGconn *conn, const char *stmtName);
PGresult * PQdescribePortal(PGconn *conn, const char *portalName);
int PQsendDescribePrepared(PGconn *conn, const char *stmtName);
int PQsendDescribePortal(PGconn *conn, const char *portalName);
char *PQencryptPassword(const char *passwd, const char *user);
#endif /* PG_BEFORE_080200 */
#ifdef PG_BEFORE_080100
Oid lo_create(PGconn *conn, Oid lobjId);
#endif /* PG_BEFORE_080100 */
#ifdef PG_BEFORE_080000
PGresult *PQprepare(PGconn *conn, const char *stmtName, const char *query,
int nParams, const Oid *paramTypes);
int PQsendPrepare(PGconn *conn, const char *stmtName, const char *query,
int nParams, const Oid *paramTypes);
int PQserverVersion(const PGconn* conn);
#endif /* PG_BEFORE_080000 */
#ifdef PG_BEFORE_070400
#define PG_DIAG_SEVERITY 'S'
#define PG_DIAG_SQLSTATE 'C'
#define PG_DIAG_MESSAGE_PRIMARY 'M'
#define PG_DIAG_MESSAGE_DETAIL 'D'
#define PG_DIAG_MESSAGE_HINT 'H'
#define PG_DIAG_STATEMENT_POSITION 'P'
#define PG_DIAG_CONTEXT 'W'
#define PG_DIAG_SOURCE_FILE 'F'
#define PG_DIAG_SOURCE_LINE 'L'
#define PG_DIAG_SOURCE_FUNCTION 'R'
#define PQfreemem(ptr) free(ptr)
#define PGNOTIFY_EXTRA(notify) ""
/* CONNECTION_SSL_STARTUP was added to an enum type
* after 7.3. For 7.3 in order to compile, we just need
* it to evaluate to something that is not present in that
* enum.
*/
#define CONNECTION_SSL_STARTUP 1000000
typedef void (*PQnoticeReceiver) (void *arg, const PGresult *res);
typedef enum
{
PQERRORS_TERSE, /* single-line error messages */
PQERRORS_DEFAULT, /* recommended style */
PQERRORS_VERBOSE /* all the facts, ma'am */
} PGVerbosity;
typedef enum
{
PQTRANS_IDLE, /* connection idle */
PQTRANS_ACTIVE, /* command in progress */
PQTRANS_INTRANS, /* idle, within transaction block */
PQTRANS_INERROR, /* idle, within failed transaction */
PQTRANS_UNKNOWN /* cannot determine status */
} PGTransactionStatusType;
PGresult *PQexecParams(PGconn *conn, const char *command, int nParams,
const Oid *paramTypes, const char * const * paramValues, const int *paramLengths,
const int *paramFormats, int resultFormat);
PGTransactionStatusType PQtransactionStatus(const PGconn *conn);
char *PQparameterStatus(const PGconn *conn, const char *paramName);
int PQprotocolVersion(const PGconn *conn);
PGresult *PQexecPrepared(PGconn *conn, const char *stmtName, int nParams,
const char * const *ParamValues, const int *paramLengths, const int *paramFormats,
int resultFormat);
int PQsendQueryParams(PGconn *conn, const char *command, int nParams,
const Oid *paramTypes, const char * const * paramValues, const int *paramLengths,
const int *paramFormats, int resultFormat);
int PQsendQueryPrepared(PGconn *conn, const char *stmtName, int nParams,
const char * const *ParamValues, const int *paramLengths, const int *paramFormats,
int resultFormat);
int PQputCopyData(PGconn *conn, const char *buffer, int nbytes);
int PQputCopyEnd(PGconn *conn, const char *errormsg);
int PQgetCopyData(PGconn *conn, char **buffer, int async);
PGVerbosity PQsetErrorVerbosity(PGconn *conn, PGVerbosity verbosity);
Oid PQftable(const PGresult *res, int column_number);
int PQftablecol(const PGresult *res, int column_number);
int PQfformat(const PGresult *res, int column_number);
char *PQresultErrorField(const PGresult *res, int fieldcode);
PQnoticeReceiver PQsetNoticeReceiver(PGconn *conn, PQnoticeReceiver proc, void *arg);
#else
#define PGNOTIFY_EXTRA(notify) ((notify)->extra)
#endif /* PG_BEFORE_070400 */
#ifdef PG_BEFORE_070300
#error unsupported postgresql version, requires 7.3 or later.
int PQsetClientEncoding(PGconn *conn, const char *encoding)
size_t PQescapeString(char *to, const char *from, size_t length);
unsigned char * PQescapeBytea(const unsigned char *bintext, size_t binlen, size_t *bytealen);
unsigned char * PQunescapeBytea(const unsigned char *strtext, size_t *retbuflen);
size_t PQescapeStringConn(PGconn *conn, char *to, const char *from,
size_t length, int *error);
unsigned char *PQescapeByteaConn(PGconn *conn, const unsigned char *from,
size_t from_length, size_t *to_length);
#endif /* PG_BEFORE_070300 */
#endif /* __compat_h */

View File

@ -4,7 +4,11 @@ require 'mkmf'
if ENV['MAINTAINER_MODE']
$stderr.puts "Maintainer mode enabled."
$CFLAGS << ' -Wall' << ' -ggdb' << ' -DDEBUG'
$CFLAGS <<
' -Wall' <<
' -ggdb' <<
' -DDEBUG' <<
' -pedantic'
end
if pgdir = with_config( 'pg' )
@ -28,6 +32,7 @@ if pgconfig = ( with_config('pg-config') || with_config('pg_config') || find_exe
$stderr.puts "Using config values from %s" % [ pgconfig ]
$CPPFLAGS << " -I%s" % [ `"#{pgconfig}" --includedir`.chomp ]
$LDFLAGS << " -L%s" % [ `"#{pgconfig}" --libdir`.chomp ]
$LIBS << " " << `"#{pgconfig}" --libs`.chomp
else
$stderr.puts "No pg_config... trying anyway. If building fails, please try again with",
" --with-pg-config=/path/to/pg_config"
@ -35,6 +40,7 @@ end
find_header( 'libpq-fe.h' ) or abort "Can't find the 'libpq-fe.h header"
find_header( 'libpq/libpq-fs.h' ) or abort "Can't find the 'libpq/libpq-fs.h header"
find_header( 'pg_config_manual.h' ) or abort "Can't find the 'pg_config_manual.h' header"
abort "Can't find the PostgreSQL client library (libpq)" unless
have_library( 'pq', 'PQconnectdb', ['libpq-fe.h'] ) ||
@ -51,6 +57,7 @@ have_func 'PQescapeStringConn'
have_func 'PQgetCancel'
have_func 'lo_create'
have_func 'pg_encoding_to_char'
have_func 'pg_char_to_encoding'
have_func 'PQsetClientEncoding'
have_func 'rb_encdb_alias'
@ -60,7 +67,7 @@ $defs.push( "-DHAVE_ST_NOTIFY_EXTRA" ) if
have_struct_member 'struct pgNotify', 'extra', 'libpq-fe.h'
# unistd.h confilicts with ruby/win32.h when cross compiling for win32 and ruby 1.9.1
have_header 'unistd.h' unless enable_config("static-build")
have_header 'unistd.h'
have_header 'ruby/st.h' or have_header 'st.h' or abort "pg currently requires the ruby/st.h header"
create_header()

4557
ext/pg.c

File diff suppressed because it is too large Load Diff

110
ext/pg.h
View File

@ -1,58 +1,122 @@
#ifndef PG_H_C98VS4AD
#define PG_H_C98VS4AD
#ifndef __pg_h
#define __pg_h
#ifdef RUBY_EXTCONF_H
# include RUBY_EXTCONF_H
#endif
/* System headers */
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#ifdef RUBY_EXTCONF_H
#include RUBY_EXTCONF_H
#endif
#ifdef HAVE_UNISTD_H
#if defined(HAVE_UNISTD_H) && !defined(_WIN32)
# include <unistd.h>
#endif /* HAVE_UNISTD_H */
/* Ruby headers */
#include "ruby.h"
#ifdef HAVE_RUBY_ST_H
#include "ruby/st.h"
# include "ruby/st.h"
#elif HAVE_ST_H
#include "st.h"
# include "st.h"
#endif
#if defined(HAVE_RUBY_ENCODING_H) && HAVE_RUBY_ENCODING_H
# include "ruby/encoding.h"
# define M17N_SUPPORTED
# define ASSOCIATE_INDEX( obj, index_holder ) rb_enc_associate_index((obj), pg_enc_get_index((index_holder)))
# ifdef HAVE_RB_ENCDB_ALIAS
extern int rb_encdb_alias(const char *, const char *);
# define ENC_ALIAS(name, orig) rb_encdb_alias((name), (orig))
# elif HAVE_RB_ENC_ALIAS
extern int rb_enc_alias(const char *, const char *);
# define ENC_ALIAS(name, orig) rb_enc_alias((name), (orig))
# else
extern int rb_enc_alias(const char *alias, const char *orig); /* declaration missing in Ruby 1.9.1 */
# define ENC_ALIAS(name, orig) rb_enc_alias((name), (orig))
# endif
#else
# define ASSOCIATE_INDEX( obj, index_holder ) /* nothing */
#endif
#include "libpq-fe.h"
#include "libpq/libpq-fs.h" /* large-object interface */
#include "compat.h"
#if RUBY_VM != 1
#define RUBY_18_COMPAT
# define RUBY_18_COMPAT
#endif
#ifndef RARRAY_LEN
#define RARRAY_LEN(x) RARRAY((x))->len
# define RARRAY_LEN(x) RARRAY((x))->len
#endif /* RARRAY_LEN */
#ifndef RSTRING_LEN
#define RSTRING_LEN(x) RSTRING((x))->len
# define RSTRING_LEN(x) RSTRING((x))->len
#endif /* RSTRING_LEN */
#ifndef RSTRING_PTR
#define RSTRING_PTR(x) RSTRING((x))->ptr
# define RSTRING_PTR(x) RSTRING((x))->ptr
#endif /* RSTRING_PTR */
#ifndef StringValuePtr
#define StringValuePtr(x) STR2CSTR(x)
# define StringValuePtr(x) STR2CSTR(x)
#endif /* StringValuePtr */
#ifdef RUBY_18_COMPAT
#define rb_io_stdio_file GetWriteFile
#include "rubyio.h"
# define rb_io_stdio_file GetWriteFile
# include "rubyio.h"
#else
#include "ruby/io.h"
# include "ruby/io.h"
#endif
/* PostgreSQL headers */
#include "libpq-fe.h"
#include "libpq/libpq-fs.h" /* large-object interface */
#include "pg_config_manual.h"
#if defined(_WIN32)
# include <fcntl.h>
__declspec(dllexport)
#endif
void Init_pg_ext(void);
#endif /* end of include guard: PG_H_C98VS4AD */
/***************************************************************************
* Globals
**************************************************************************/
extern VALUE rb_mPG;
extern VALUE rb_ePGerror;
extern VALUE rb_mPGconstants;
extern VALUE rb_cPGconn;
extern VALUE rb_cPGresult;
/***************************************************************************
* MACROS
**************************************************************************/
#define UNUSED(x) ((void)(x))
#define SINGLETON_ALIAS(klass,new,old) rb_define_alias(rb_singleton_class((klass)),(new),(old))
/***************************************************************************
* PROTOTYPES
**************************************************************************/
void Init_pg_ext _(( void ));
void init_pg_connection _(( void ));
void init_pg_result _(( void ));
PGconn *pg_get_pgconn _(( VALUE ));
VALUE pg_new_result _(( PGresult *, PGconn * ));
void pg_check_result _(( VALUE, VALUE ));
VALUE pg_result_clear _(( VALUE ));
#ifdef M17N_SUPPORTED
rb_encoding * pg_get_pg_encoding_as_rb_encoding _(( int ));
rb_encoding * pg_get_pg_encname_as_rb_encoding _(( const char * ));
const char * pg_get_rb_encoding_as_pg_encoding _(( rb_encoding * ));
int pg_enc_get_index _(( VALUE ));
rb_encoding *pg_conn_enc_get _(( PGconn * ));
#endif /* M17N_SUPPORTED */
#endif /* end __pg_h */

3283
ext/pg_connection.c Normal file

File diff suppressed because it is too large Load Diff

905
ext/pg_result.c Normal file
View File

@ -0,0 +1,905 @@
/*
* pg_result.c - PG::Result class extension
* $Id$
*
*/
#include "pg.h"
VALUE rb_cPGresult;
static void pgresult_gc_free( PGresult * );
static PGresult* pgresult_get( VALUE );
/*
* Global functions
*/
/*
* Result constructor
*/
VALUE
pg_new_result(PGresult *result, PGconn *conn)
{
VALUE val = Data_Wrap_Struct(rb_cPGresult, NULL, pgresult_gc_free, result);
#ifdef M17N_SUPPORTED
rb_encoding *enc = pg_conn_enc_get( conn );
rb_enc_set_index( val, rb_enc_to_index(enc) );
#endif
return val;
}
/*
* Raises appropriate exception if PGresult is
* in a bad state.
*/
void
pg_check_result(VALUE rb_pgconn, VALUE rb_pgresult)
{
VALUE error, exception;
PGconn *conn = pg_get_pgconn(rb_pgconn);
PGresult *result;
#ifdef M17N_SUPPORTED
rb_encoding *enc = pg_conn_enc_get( conn );
#endif
Data_Get_Struct(rb_pgresult, PGresult, result);
if(result == NULL)
{
error = rb_str_new2( PQerrorMessage(conn) );
}
else
{
switch (PQresultStatus(result))
{
case PGRES_TUPLES_OK:
case PGRES_COPY_OUT:
case PGRES_COPY_IN:
case PGRES_EMPTY_QUERY:
case PGRES_COMMAND_OK:
return;
case PGRES_BAD_RESPONSE:
case PGRES_FATAL_ERROR:
case PGRES_NONFATAL_ERROR:
error = rb_str_new2( PQresultErrorMessage(result) );
break;
default:
error = rb_str_new2( "internal error : unknown result status." );
}
}
#ifdef M17N_SUPPORTED
rb_enc_set_index( error, rb_enc_to_index(enc) );
#endif
exception = rb_exc_new3( rb_ePGerror, error );
rb_iv_set( exception, "@connection", rb_pgconn );
rb_iv_set( exception, "@result", rb_pgresult );
rb_exc_raise( exception );
return;
}
/*
* :TODO: This shouldn't be a global function, but it needs to be as long as pg_new_result
* doesn't handle blocks, check results, etc. Once connection and result are disentangled
* a bit more, I can make this a static pgresult_clear() again.
*/
/*
* call-seq:
* res.clear() -> nil
*
* Clears the PGresult object as the result of the query.
*/
VALUE
pg_result_clear(VALUE self)
{
PQclear(pgresult_get(self));
DATA_PTR(self) = NULL;
return Qnil;
}
/*
* DATA pointer functions
*/
/*
* GC Free function
*/
static void
pgresult_gc_free( PGresult *result )
{
if(result != NULL)
PQclear(result);
}
/*
* Fetch the data pointer for the result object
*/
static PGresult*
pgresult_get(VALUE self)
{
PGresult *result;
Data_Get_Struct(self, PGresult, result);
if (result == NULL) rb_raise(rb_ePGerror, "result has been cleared");
return result;
}
/********************************************************************
*
* Document-class: PGresult
*
* The class to represent the query result tuples (rows).
* An instance of this class is created as the result of every query.
* You may need to invoke the #clear method of the instance when finished with
* the result for better memory performance.
*
* Example:
* require 'pg'
* conn = PGconn.open(:dbname => 'test')
* res = conn.exec('SELECT 1 AS a, 2 AS b, NULL AS c')
* res.getvalue(0,0) # '1'
* res[0]['b'] # '2'
* res[0]['c'] # nil
*
*/
/**************************************************************************
* PGresult INSTANCE METHODS
**************************************************************************/
/*
* call-seq:
* res.result_status() -> Fixnum
*
* Returns the status of the query. The status value is one of:
* * +PGRES_EMPTY_QUERY+
* * +PGRES_COMMAND_OK+
* * +PGRES_TUPLES_OK+
* * +PGRES_COPY_OUT+
* * +PGRES_COPY_IN+
* * +PGRES_BAD_RESPONSE+
* * +PGRES_NONFATAL_ERROR+
* * +PGRES_FATAL_ERROR+
*/
static VALUE
pgresult_result_status(VALUE self)
{
return INT2FIX(PQresultStatus(pgresult_get(self)));
}
/*
* call-seq:
* res.res_status( status ) -> String
*
* Returns the string representation of status +status+.
*
*/
static VALUE
pgresult_res_status(VALUE self, VALUE status)
{
VALUE ret = rb_tainted_str_new2(PQresStatus(NUM2INT(status)));
ASSOCIATE_INDEX(ret, self);
return ret;
}
/*
* call-seq:
* res.error_message() -> String
*
* Returns the error message of the command as a string.
*/
static VALUE
pgresult_error_message(VALUE self)
{
VALUE ret = rb_tainted_str_new2(PQresultErrorMessage(pgresult_get(self)));
ASSOCIATE_INDEX(ret, self);
return ret;
}
/*
* call-seq:
* res.error_field(fieldcode) -> String
*
* Returns the individual field of an error.
*
* +fieldcode+ is one of:
* * +PG_DIAG_SEVERITY+
* * +PG_DIAG_SQLSTATE+
* * +PG_DIAG_MESSAGE_PRIMARY+
* * +PG_DIAG_MESSAGE_DETAIL+
* * +PG_DIAG_MESSAGE_HINT+
* * +PG_DIAG_STATEMENT_POSITION+
* * +PG_DIAG_INTERNAL_POSITION+
* * +PG_DIAG_INTERNAL_QUERY+
* * +PG_DIAG_CONTEXT+
* * +PG_DIAG_SOURCE_FILE+
* * +PG_DIAG_SOURCE_LINE+
* * +PG_DIAG_SOURCE_FUNCTION+
*
* An example:
*
* begin
* conn.exec( "SELECT * FROM nonexistant_table" )
* rescue PGError => err
* p [
* result.error_field( PGresult::PG_DIAG_SEVERITY ),
* result.error_field( PGresult::PG_DIAG_SQLSTATE ),
* result.error_field( PGresult::PG_DIAG_MESSAGE_PRIMARY ),
* result.error_field( PGresult::PG_DIAG_MESSAGE_DETAIL ),
* result.error_field( PGresult::PG_DIAG_MESSAGE_HINT ),
* result.error_field( PGresult::PG_DIAG_STATEMENT_POSITION ),
* result.error_field( PGresult::PG_DIAG_INTERNAL_POSITION ),
* result.error_field( PGresult::PG_DIAG_INTERNAL_QUERY ),
* result.error_field( PGresult::PG_DIAG_CONTEXT ),
* result.error_field( PGresult::PG_DIAG_SOURCE_FILE ),
* result.error_field( PGresult::PG_DIAG_SOURCE_LINE ),
* result.error_field( PGresult::PG_DIAG_SOURCE_FUNCTION ),
* ]
* end
*
* Outputs:
*
* ["ERROR", "42P01", "relation \"nonexistant_table\" does not exist", nil, nil,
* "15", nil, nil, nil, "path/to/parse_relation.c", "857", "parserOpenTable"]
*/
static VALUE
pgresult_error_field(VALUE self, VALUE field)
{
PGresult *result = pgresult_get( self );
int fieldcode = NUM2INT( field );
char * fieldstr = PQresultErrorField( result, fieldcode );
VALUE ret = Qnil;
if ( fieldstr ) {
ret = rb_tainted_str_new2( fieldstr );
ASSOCIATE_INDEX( ret, self );
}
return ret;
}
/*
* call-seq:
* res.ntuples() -> Fixnum
*
* Returns the number of tuples in the query result.
*/
static VALUE
pgresult_ntuples(VALUE self)
{
return INT2FIX(PQntuples(pgresult_get(self)));
}
/*
* call-seq:
* res.nfields() -> Fixnum
*
* Returns the number of columns in the query result.
*/
static VALUE
pgresult_nfields(VALUE self)
{
return INT2NUM(PQnfields(pgresult_get(self)));
}
/*
* call-seq:
* res.fname( index ) -> String
*
* Returns the name of the column corresponding to _index_.
*/
static VALUE
pgresult_fname(VALUE self, VALUE index)
{
VALUE fname;
PGresult *result;
int i = NUM2INT(index);
result = pgresult_get(self);
if (i < 0 || i >= PQnfields(result)) {
rb_raise(rb_eArgError,"invalid field number %d", i);
}
fname = rb_tainted_str_new2(PQfname(result, i));
ASSOCIATE_INDEX(fname, self);
return fname;
}
/*
* call-seq:
* res.fnumber( name ) -> Fixnum
*
* Returns the index of the field specified by the string _name_.
*
* Raises an ArgumentError if the specified _name_ isn't one of the field names;
* raises a TypeError if _name_ is not a String.
*/
static VALUE
pgresult_fnumber(VALUE self, VALUE name)
{
int n;
Check_Type(name, T_STRING);
n = PQfnumber(pgresult_get(self), StringValuePtr(name));
if (n == -1) {
rb_raise(rb_eArgError,"Unknown field: %s", StringValuePtr(name));
}
return INT2FIX(n);
}
/*
* call-seq:
* res.ftable( column_number ) -> Fixnum
*
* Returns the Oid of the table from which the column _column_number_
* was fetched.
*
* Raises ArgumentError if _column_number_ is out of range or if
* the Oid is undefined for that column.
*/
static VALUE
pgresult_ftable(VALUE self, VALUE column_number)
{
Oid n ;
int col_number = NUM2INT(column_number);
PGresult *pgresult = pgresult_get(self);
if( col_number < 0 || col_number >= PQnfields(pgresult))
rb_raise(rb_eArgError,"Invalid column index: %d", col_number);
n = PQftable(pgresult, col_number);
return INT2FIX(n);
}
/*
* call-seq:
* res.ftablecol( column_number ) -> Fixnum
*
* Returns the column number (within its table) of the table from
* which the column _column_number_ is made up.
*
* Raises ArgumentError if _column_number_ is out of range or if
* the column number from its table is undefined for that column.
*/
static VALUE
pgresult_ftablecol(VALUE self, VALUE column_number)
{
int col_number = NUM2INT(column_number);
PGresult *pgresult = pgresult_get(self);
int n;
if( col_number < 0 || col_number >= PQnfields(pgresult))
rb_raise(rb_eArgError,"Invalid column index: %d", col_number);
n = PQftablecol(pgresult, col_number);
return INT2FIX(n);
}
/*
* call-seq:
* res.fformat( column_number ) -> Fixnum
*
* Returns the format (0 for text, 1 for binary) of column
* _column_number_.
*
* Raises ArgumentError if _column_number_ is out of range.
*/
static VALUE
pgresult_fformat(VALUE self, VALUE column_number)
{
PGresult *result = pgresult_get(self);
int fnumber = NUM2INT(column_number);
if (fnumber < 0 || fnumber >= PQnfields(result)) {
rb_raise(rb_eArgError, "Column number is out of range: %d",
fnumber);
}
return INT2FIX(PQfformat(result, fnumber));
}
/*
* call-seq:
* res.ftype( column_number )
*
* Returns the data type associated with _column_number_.
*
* The integer returned is the internal +OID+ number (in PostgreSQL)
* of the type. To get a human-readable value for the type, use the
* returned OID and the field's #fmod value with the format_type() SQL
* function:
*
* # Get the type of the second column of the result 'res'
* typename = conn.
* exec( "SELECT format_type($1,$2)", [res.ftype(1), res.fmod(1)] ).
* getvalue( 0, 0 )
*
* Raises an ArgumentError if _column_number_ is out of range.
*/
static VALUE
pgresult_ftype(VALUE self, VALUE index)
{
PGresult* result = pgresult_get(self);
int i = NUM2INT(index);
if (i < 0 || i >= PQnfields(result)) {
rb_raise(rb_eArgError, "invalid field number %d", i);
}
return INT2NUM(PQftype(result, i));
}
/*
* call-seq:
* res.fmod( column_number )
*
* Returns the type modifier associated with column _column_number_. See
* the #ftype method for an example of how to use this.
*
* Raises an ArgumentError if _column_number_ is out of range.
*/
static VALUE
pgresult_fmod(VALUE self, VALUE column_number)
{
PGresult *result = pgresult_get(self);
int fnumber = NUM2INT(column_number);
int modifier;
if (fnumber < 0 || fnumber >= PQnfields(result)) {
rb_raise(rb_eArgError, "Column number is out of range: %d",
fnumber);
}
modifier = PQfmod(result,fnumber);
return INT2NUM(modifier);
}
/*
* call-seq:
* res.fsize( index )
*
* Returns the size of the field type in bytes. Returns <tt>-1</tt> if the field is variable sized.
*
* res = conn.exec("SELECT myInt, myVarChar50 FROM foo")
* res.size(0) => 4
* res.size(1) => -1
*/
static VALUE
pgresult_fsize(VALUE self, VALUE index)
{
PGresult *result;
int i = NUM2INT(index);
result = pgresult_get(self);
if (i < 0 || i >= PQnfields(result)) {
rb_raise(rb_eArgError,"invalid field number %d", i);
}
return INT2NUM(PQfsize(result, i));
}
/*
* call-seq:
* res.getvalue( tup_num, field_num )
*
* Returns the value in tuple number _tup_num_, field _field_num_,
* or +nil+ if the field is +NULL+.
*/
static VALUE
pgresult_getvalue(VALUE self, VALUE tup_num, VALUE field_num)
{
VALUE ret;
PGresult *result;
int i = NUM2INT(tup_num);
int j = NUM2INT(field_num);
result = pgresult_get(self);
if(i < 0 || i >= PQntuples(result)) {
rb_raise(rb_eArgError,"invalid tuple number %d", i);
}
if(j < 0 || j >= PQnfields(result)) {
rb_raise(rb_eArgError,"invalid field number %d", j);
}
if(PQgetisnull(result, i, j))
return Qnil;
ret = rb_tainted_str_new(PQgetvalue(result, i, j),
PQgetlength(result, i, j));
ASSOCIATE_INDEX(ret, self);
return ret;
}
/*
* call-seq:
* res.getisnull(tuple_position, field_position) -> boolean
*
* Returns +true+ if the specified value is +nil+; +false+ otherwise.
*/
static VALUE
pgresult_getisnull(VALUE self, VALUE tup_num, VALUE field_num)
{
PGresult *result;
int i = NUM2INT(tup_num);
int j = NUM2INT(field_num);
result = pgresult_get(self);
if (i < 0 || i >= PQntuples(result)) {
rb_raise(rb_eArgError,"invalid tuple number %d", i);
}
if (j < 0 || j >= PQnfields(result)) {
rb_raise(rb_eArgError,"invalid field number %d", j);
}
return PQgetisnull(result, i, j) ? Qtrue : Qfalse;
}
/*
* call-seq:
* res.getlength( tup_num, field_num ) -> Fixnum
*
* Returns the (String) length of the field in bytes.
*
* Equivalent to <tt>res.value(<i>tup_num</i>,<i>field_num</i>).length</tt>.
*/
static VALUE
pgresult_getlength(VALUE self, VALUE tup_num, VALUE field_num)
{
PGresult *result;
int i = NUM2INT(tup_num);
int j = NUM2INT(field_num);
result = pgresult_get(self);
if (i < 0 || i >= PQntuples(result)) {
rb_raise(rb_eArgError,"invalid tuple number %d", i);
}
if (j < 0 || j >= PQnfields(result)) {
rb_raise(rb_eArgError,"invalid field number %d", j);
}
return INT2FIX(PQgetlength(result, i, j));
}
/*
* call-seq:
* res.nparams() -> Fixnum
*
* Returns the number of parameters of a prepared statement.
* Only useful for the result returned by conn.describePrepared
*/
static VALUE
pgresult_nparams(VALUE self)
{
PGresult *result;
result = pgresult_get(self);
return INT2FIX(PQnparams(result));
}
/*
* call-seq:
* res.paramtype( param_number ) -> Oid
*
* Returns the Oid of the data type of parameter _param_number_.
* Only useful for the result returned by conn.describePrepared
*/
static VALUE
pgresult_paramtype(VALUE self, VALUE param_number)
{
PGresult *result;
result = pgresult_get(self);
return INT2FIX(PQparamtype(result,NUM2INT(param_number)));
}
/*
* call-seq:
* res.cmd_status() -> String
*
* Returns the status string of the last query command.
*/
static VALUE
pgresult_cmd_status(VALUE self)
{
VALUE ret = rb_tainted_str_new2(PQcmdStatus(pgresult_get(self)));
ASSOCIATE_INDEX(ret, self);
return ret;
}
/*
* call-seq:
* res.cmd_tuples() -> Fixnum
*
* Returns the number of tuples (rows) affected by the SQL command.
*
* If the SQL command that generated the PGresult was not one of:
* * +INSERT+
* * +UPDATE+
* * +DELETE+
* * +MOVE+
* * +FETCH+
* or if no tuples were affected, <tt>0</tt> is returned.
*/
static VALUE
pgresult_cmd_tuples(VALUE self)
{
long n;
n = strtol(PQcmdTuples(pgresult_get(self)),NULL, 10);
return INT2NUM(n);
}
/*
* call-seq:
* res.oid_value() -> Fixnum
*
* Returns the +oid+ of the inserted row if applicable,
* otherwise +nil+.
*/
static VALUE
pgresult_oid_value(VALUE self)
{
Oid n = PQoidValue(pgresult_get(self));
if (n == InvalidOid)
return Qnil;
else
return INT2FIX(n);
}
/* Utility methods not in libpq */
/*
* call-seq:
* res[ n ] -> Hash
*
* Returns tuple _n_ as a hash.
*/
static VALUE
pgresult_aref(VALUE self, VALUE index)
{
PGresult *result = pgresult_get(self);
int tuple_num = NUM2INT(index);
int field_num;
VALUE fname,val;
VALUE tuple;
if ( tuple_num < 0 || tuple_num >= PQntuples(result) )
rb_raise( rb_eIndexError, "Index %d is out of range", tuple_num );
tuple = rb_hash_new();
for ( field_num = 0; field_num < PQnfields(result); field_num++ ) {
fname = rb_tainted_str_new2( PQfname(result,field_num) );
ASSOCIATE_INDEX(fname, self);
if ( PQgetisnull(result, tuple_num, field_num) ) {
rb_hash_aset( tuple, fname, Qnil );
}
else {
val = rb_tainted_str_new( PQgetvalue(result, tuple_num, field_num ),
PQgetlength(result, tuple_num, field_num) );
#ifdef M17N_SUPPORTED
/* associate client encoding for text format only */
if ( 0 == PQfformat(result, field_num) ) {
ASSOCIATE_INDEX( val, self );
} else {
rb_enc_associate( val, rb_ascii8bit_encoding() );
}
#endif
rb_hash_aset( tuple, fname, val );
}
}
return tuple;
}
/*
* call-seq:
* res.values -> Array
*
* Returns all tuples as an array of arrays.
*/
static VALUE
pgresult_values(VALUE self)
{
PGresult* result = (PGresult*) pgresult_get(self);
int row;
int field;
int num_rows = PQntuples(result);
int num_fields = PQnfields(result);
VALUE ary = rb_ary_new2(num_rows);
for ( row = 0; row < num_rows; row++ ) {
/* create new row */
VALUE new_row = rb_ary_new2(num_fields);
/* add to return array */
rb_ary_store( ary, row, new_row );
/* populate it */
for ( field = 0; field < num_fields; field++ ) {
if ( PQgetisnull(result, row, field) ) {
rb_ary_store( new_row, field, Qnil );
}
else {
VALUE val = rb_tainted_str_new( PQgetvalue(result, row, field),
PQgetlength(result, row, field) );
#ifdef M17N_SUPPORTED
/* associate client encoding for text format only */
if ( 0 == PQfformat(result, field) ) {
ASSOCIATE_INDEX( val, self );
} else {
rb_enc_associate( val, rb_ascii8bit_encoding() );
}
#endif
rb_ary_store( new_row, field, val );
}
}
}
return ary;
}
/*
* Make a Ruby array out of the encoded values from the specified
* column in the given result.
*/
static VALUE
make_column_result_array( VALUE self, int col )
{
PGresult *result = pgresult_get( self );
int row = PQntuples( result );
VALUE ary = rb_ary_new2( row );
VALUE val = Qnil;
if ( col >= PQnfields(result) )
rb_raise( rb_eIndexError, "no column %d in result", col );
while ( row-- ) {
val = rb_tainted_str_new( PQgetvalue(result, row, col),
PQgetlength(result, row, col) );
#ifdef M17N_SUPPORTED
/* associate client encoding for text format only */
if ( 0 == PQfformat(result, col) ) {
ASSOCIATE_INDEX( val, self );
} else {
rb_enc_associate( val, rb_ascii8bit_encoding() );
}
#endif
rb_ary_store( ary, row, val );
}
return ary;
}
/*
* call-seq:
* res.column_values( n ) -> array
*
* Returns an Array of the values from the nth column of each
* tuple in the result.
*
*/
static VALUE
pgresult_column_values(VALUE self, VALUE index)
{
int col = NUM2INT( index );
return make_column_result_array( self, col );
}
/*
* call-seq:
* res.field_values( field ) -> array
*
* Returns an Array of the values from the given _field_ of each tuple in the result.
*
*/
static VALUE
pgresult_field_values( VALUE self, VALUE field )
{
PGresult *result = pgresult_get( self );
const char *fieldname = RSTRING_PTR( field );
int fnum = PQfnumber( result, fieldname );
if ( fnum < 0 )
rb_raise( rb_eIndexError, "no such field '%s' in result", fieldname );
return make_column_result_array( self, fnum );
}
/*
* call-seq:
* res.each{ |tuple| ... }
*
* Invokes block for each tuple in the result set.
*/
static VALUE
pgresult_each(VALUE self)
{
PGresult *result = pgresult_get(self);
int tuple_num;
for(tuple_num = 0; tuple_num < PQntuples(result); tuple_num++) {
rb_yield(pgresult_aref(self, INT2NUM(tuple_num)));
}
return self;
}
/*
* call-seq:
* res.fields() -> Array
*
* Returns an array of Strings representing the names of the fields in the result.
*/
static VALUE
pgresult_fields(VALUE self)
{
PGresult *result;
VALUE ary;
int n, i;
result = pgresult_get(self);
n = PQnfields(result);
ary = rb_ary_new2(n);
for (i=0;i<n;i++) {
VALUE val = rb_tainted_str_new2(PQfname(result, i));
ASSOCIATE_INDEX(val, self);
rb_ary_push(ary, val);
}
return ary;
}
void
init_pg_result()
{
rb_cPGresult = rb_define_class_under( rb_mPG, "Result", rb_cObject );
rb_include_module(rb_cPGresult, rb_mEnumerable);
rb_include_module(rb_cPGresult, rb_mPGconstants);
/****** PG::Result INSTANCE METHODS: libpq ******/
rb_define_method(rb_cPGresult, "result_status", pgresult_result_status, 0);
rb_define_method(rb_cPGresult, "res_status", pgresult_res_status, 1);
rb_define_method(rb_cPGresult, "error_message", pgresult_error_message, 0);
rb_define_alias( rb_cPGresult, "result_error_message", "error_message");
rb_define_method(rb_cPGresult, "error_field", pgresult_error_field, 1);
rb_define_alias( rb_cPGresult, "result_error_field", "error_field" );
rb_define_method(rb_cPGresult, "clear", pg_result_clear, 0);
rb_define_method(rb_cPGresult, "ntuples", pgresult_ntuples, 0);
rb_define_alias(rb_cPGresult, "num_tuples", "ntuples");
rb_define_method(rb_cPGresult, "nfields", pgresult_nfields, 0);
rb_define_alias(rb_cPGresult, "num_fields", "nfields");
rb_define_method(rb_cPGresult, "fname", pgresult_fname, 1);
rb_define_method(rb_cPGresult, "fnumber", pgresult_fnumber, 1);
rb_define_method(rb_cPGresult, "ftable", pgresult_ftable, 1);
rb_define_method(rb_cPGresult, "ftablecol", pgresult_ftablecol, 1);
rb_define_method(rb_cPGresult, "fformat", pgresult_fformat, 1);
rb_define_method(rb_cPGresult, "ftype", pgresult_ftype, 1);
rb_define_method(rb_cPGresult, "fmod", pgresult_fmod, 1);
rb_define_method(rb_cPGresult, "fsize", pgresult_fsize, 1);
rb_define_method(rb_cPGresult, "getvalue", pgresult_getvalue, 2);
rb_define_method(rb_cPGresult, "getisnull", pgresult_getisnull, 2);
rb_define_method(rb_cPGresult, "getlength", pgresult_getlength, 2);
rb_define_method(rb_cPGresult, "nparams", pgresult_nparams, 0);
rb_define_method(rb_cPGresult, "paramtype", pgresult_paramtype, 1);
rb_define_method(rb_cPGresult, "cmd_status", pgresult_cmd_status, 0);
rb_define_method(rb_cPGresult, "cmd_tuples", pgresult_cmd_tuples, 0);
rb_define_alias(rb_cPGresult, "cmdtuples", "cmd_tuples");
rb_define_method(rb_cPGresult, "oid_value", pgresult_oid_value, 0);
/****** PG::Result INSTANCE METHODS: other ******/
rb_define_method(rb_cPGresult, "[]", pgresult_aref, 1);
rb_define_method(rb_cPGresult, "each", pgresult_each, 0);
rb_define_method(rb_cPGresult, "fields", pgresult_fields, 0);
rb_define_method(rb_cPGresult, "values", pgresult_values, 0);
rb_define_method(rb_cPGresult, "column_values", pgresult_column_values, 1);
rb_define_method(rb_cPGresult, "field_values", pgresult_field_values, 1);
}

View File

@ -14,56 +14,38 @@ rescue LoadError
end
#--
# The PG connection class.
class PGconn
# The order the options are passed to the ::connect method.
CONNECT_ARGUMENT_ORDER = %w[host port options tty dbname user password]
# The top-level PG namespace.
module PG
# Library version
VERSION = '0.13.0'
# VCS revision
REVISION = %q$Revision$
### Quote the given +value+ for use in a connection-parameter string.
def self::quote_connstr( value )
return "'" + value.to_s.gsub( /[\\']/ ) {|m| '\\' + m } + "'"
### Get the PG library version. If +include_buildnum+ is +true+, include the build ID.
def self::version_string( include_buildnum=false )
vstring = "%s %s" % [ self.name, VERSION ]
vstring << " (build %s)" % [ REVISION[/: ([[:xdigit:]]+)/, 1] || '0' ] if include_buildnum
return vstring
end
### Parse the connection +args+ into a connection-parameter string. See PGconn.new
### for valid arguments.
def self::parse_connect_args( *args )
return '' if args.empty?
# This will be swapped soon for code that makes options like those required for
# PQconnectdbParams()/PQconnectStartParams(). For now, stick to an options string for
# PQconnectdb()/PQconnectStart().
connopts = []
# Handle an options hash first
if args.last.is_a?( Hash )
opthash = args.pop
opthash.each do |key, val|
connopts.push( "%s=%s" % [key, PGconn.quote_connstr(val)] )
end
end
# Option string style
if args.length == 1 && args.first.to_s.index( '=' )
connopts.unshift( args.first )
# Append positional parameters
else
args.each_with_index do |val, i|
next unless val # Skip nil placeholders
key = CONNECT_ARGUMENT_ORDER[ i ] or
raise ArgumentError, "Extra positional parameter %d: %p" % [ i+1, val ]
connopts.push( "%s=%s" % [key, PGconn.quote_connstr(val.to_s)] )
end
end
return connopts.join(' ')
### Convenience alias for PG::Connection.new.
def self::connect( *args )
return PG::Connection.new( *args )
end
end # class PGconn
require 'pg/exceptions'
require 'pg/connection'
require 'pg/result'
end # module PG
# Backward-compatible aliase
PGError = PG::Error

58
lib/pg/connection.rb Normal file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env ruby
require 'pg' unless defined?( PG )
# The PG connection class.
class PG::Connection
# The order the options are passed to the ::connect method.
CONNECT_ARGUMENT_ORDER = %w[host port options tty dbname user password]
### Quote the given +value+ for use in a connection-parameter string.
def self::quote_connstr( value )
return "'" + value.to_s.gsub( /[\\']/ ) {|m| '\\' + m } + "'"
end
### Parse the connection +args+ into a connection-parameter string. See PG::Connection.new
### for valid arguments.
def self::parse_connect_args( *args )
return '' if args.empty?
# This will be swapped soon for code that makes options like those required for
# PQconnectdbParams()/PQconnectStartParams(). For now, stick to an options string for
# PQconnectdb()/PQconnectStart().
connopts = []
# Handle an options hash first
if args.last.is_a?( Hash )
opthash = args.pop
opthash.each do |key, val|
connopts.push( "%s=%s" % [key, PG::Connection.quote_connstr(val)] )
end
end
# Option string style
if args.length == 1 && args.first.to_s.index( '=' )
connopts.unshift( args.first )
# Append positional parameters
else
args.each_with_index do |val, i|
next unless val # Skip nil placeholders
key = CONNECT_ARGUMENT_ORDER[ i ] or
raise ArgumentError, "Extra positional parameter %d: %p" % [ i+1, val ]
connopts.push( "%s=%s" % [key, PG::Connection.quote_connstr(val.to_s)] )
end
end
return connopts.join(' ')
end
end # class PG::Connection
# Backward-compatible alias
PGconn = PG::Connection

11
lib/pg/constants.rb Normal file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env ruby
require 'pg' unless defined?( PG )
module PG::Constants
# Most of these are defined in the extension.
end # module PG::Constants

11
lib/pg/exceptions.rb Normal file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env ruby
require 'pg' unless defined?( PG )
module PG
class Error < StandardError; end
end # module PG

11
lib/pg/result.rb Normal file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env ruby
require 'pg' unless defined?( PG )
class PG::Result
end # class PG::Result
# Backward-compatible alias
PGresult = PG::Result

View File

@ -8,7 +8,7 @@ SOCKHOST = 'it-trac.laika.com'
# Load pg first, so the libssl.so that libpq is linked against is loaded.
require 'pg'
$stderr.puts "connecting to postgres://#{PGHOST}/#{PGDB}"
conn = PGconn.connect( PGHOST, :dbname => PGDB )
conn = PG.connect( PGHOST, :dbname => PGDB )
# Now load OpenSSL, which might be linked against a different libssl.
require 'socket'

View File

@ -4,7 +4,7 @@ require 'pg'
# This is a example of how to use the asynchronous API to query the
# server without blocking other threads. It's intentionally low-level;
# if you hooked up the PGconn#socket to some kind of reactor, you
# if you hooked up the PG::#socket to some kind of reactor, you
# could make this much nicer.
TIMEOUT = 5.0 # seconds to wait for an async operation to complete
@ -28,9 +28,9 @@ end
# Start the connection
output_progress "Starting connection..."
conn = PGconn.connect_start( CONN_OPTS ) or abort "Unable to create a new connection!"
conn = PG::Connection.connect_start( CONN_OPTS ) or abort "Unable to create a new connection!"
abort "Connection failed: %s" % [ conn.error_message ] if
conn.status == PGconn::CONNECTION_BAD
conn.status == PG::CONNECTION_BAD
# Now grab a reference to the underlying socket so we know when the
# connection is established
@ -38,19 +38,19 @@ socket = IO.for_fd( conn.socket )
# Track the progress of the connection, waiting for the socket to become readable/writable
# before polling it
poll_status = PGconn::PGRES_POLLING_WRITING
until poll_status == PGconn::PGRES_POLLING_OK ||
poll_status == PGconn::PGRES_POLLING_FAILED
poll_status = PG::PGRES_POLLING_WRITING
until poll_status == PG::PGRES_POLLING_OK ||
poll_status == PG::PGRES_POLLING_FAILED
# If the socket needs to read, wait 'til it becomes readable to poll again
case poll_status
when PGconn::PGRES_POLLING_READING
when PG::PGRES_POLLING_READING
output_progress " waiting for socket to become readable"
select( [socket], nil, nil, TIMEOUT ) or
raise "Asynchronous connection timed out!"
# ...and the same for when the socket needs to write
when PGconn::PGRES_POLLING_WRITING
when PG::PGRES_POLLING_WRITING
output_progress " waiting for socket to become writable"
select( nil, [socket], nil, TIMEOUT ) or
raise "Asynchronous connection timed out!"
@ -58,17 +58,17 @@ until poll_status == PGconn::PGRES_POLLING_OK ||
# Output a status message about the progress
case conn.status
when PGconn::CONNECTION_STARTED
when PG::CONNECTION_STARTED
output_progress " waiting for connection to be made."
when PGconn::CONNECTION_MADE
when PG::CONNECTION_MADE
output_progress " connection OK; waiting to send."
when PGconn::CONNECTION_AWAITING_RESPONSE
when PG::CONNECTION_AWAITING_RESPONSE
output_progress " waiting for a response from the server."
when PGconn::CONNECTION_AUTH_OK
when PG::CONNECTION_AUTH_OK
output_progress " received authentication; waiting for backend start-up to finish."
when PGconn::CONNECTION_SSL_STARTUP
when PG::CONNECTION_SSL_STARTUP
output_progress " negotiating SSL encryption."
when PGconn::CONNECTION_SETENV
when PG::CONNECTION_SETENV
output_progress " negotiating environment-driven parameter settings."
end
@ -76,7 +76,7 @@ until poll_status == PGconn::PGRES_POLLING_OK ||
poll_status = conn.connect_poll
end
abort "Connect failed: %s" % [ conn.error_message ] unless conn.status == PGconn::CONNECTION_OK
abort "Connect failed: %s" % [ conn.error_message ] unless conn.status == PG::CONNECTION_OK
output_progress "Sending query"
conn.send_query( "SELECT * FROM pg_stat_activity" )

View File

@ -6,7 +6,7 @@ require 'stringio'
# Using COPY asynchronously
$stderr.puts "Opening database connection ..."
conn = PGconn.connect( :dbname => 'test' )
conn = PG.connect( :dbname => 'test' )
conn.setnonblocking( true )
socket = IO.for_fd( conn.socket )

View File

@ -21,9 +21,9 @@ end
# Start the (synchronous) connection
output_progress "Starting connection..."
conn = PGconn.connect( CONN_OPTS ) or abort "Unable to create a new connection!"
conn = PG.connect( CONN_OPTS ) or abort "Unable to create a new connection!"
abort "Connect failed: %s" % [ conn.error_message ] unless conn.status == PGconn::CONNECTION_OK
abort "Connect failed: %s" % [ conn.error_message ] unless conn.status == PG::CONNECTION_OK
# Now grab a reference to the underlying socket to select() on while the query is running
socket = IO.for_fd( conn.socket )

View File

@ -4,7 +4,7 @@ require 'pg'
require 'stringio'
$stderr.puts "Opening database connection ..."
conn = PGconn.connect( :dbname => 'test' )
conn = PG.connect( :dbname => 'test' )
conn.exec( <<END_SQL )
DROP TABLE IF EXISTS logs;

View File

@ -6,7 +6,7 @@ require 'stringio'
# An example of how to stream data to your local host from the database as CSV.
$stderr.puts "Opening database connection ..."
conn = PGconn.connect( :dbname => 'test' )
conn = PG.connect( :dbname => 'test' )
$stderr.puts "Running COPY command ..."
buf = ''

View File

@ -6,7 +6,7 @@ require 'pg'
# the cursor portion of testlibpq.c from src/test/examples.
$stderr.puts "Opening database connection ..."
conn = PGconn.connect( :dbname => 'test' )
conn = PG.connect( :dbname => 'test' )
#
conn.transaction do

View File

@ -5,7 +5,7 @@ require 'pg'
SAMPLE_WRITE_DATA = 'some sample data'
SAMPLE_EXPORT_NAME = 'lowrite.txt'
conn = PGconn.connect( :dbname => 'test', :host => 'localhost', :port => 5432 )
conn = PG.connect( :dbname => 'test', :host => 'localhost', :port => 5432 )
puts "dbname: " + conn.db + "\thost: " + conn.host + "\tuser: " + conn.user
# Start a transaction, as all large object functions require one.
@ -20,15 +20,15 @@ puts " imported as large object %d" % [ oid ]
# Read back 50 bytes of the imported data
puts "Read test:"
fd = conn.lo_open( oid, PGconn::INV_READ|PGconn::INV_WRITE )
conn.lo_lseek( fd, 0, PGconn::SEEK_SET )
fd = conn.lo_open( oid, PG::INV_READ|PG::INV_WRITE )
conn.lo_lseek( fd, 0, PG::SEEK_SET )
buf = conn.lo_read( fd, 50 )
puts " read: %p" % [ buf ]
puts " read was ok!" if buf =~ /require 'pg'/
# Append some test data onto the end of the object
puts "Write test:"
conn.lo_lseek( fd, 0, PGconn::SEEK_END )
conn.lo_lseek( fd, 0, PG::SEEK_END )
buf = SAMPLE_WRITE_DATA.dup
totalbytes = 0
until buf.empty?
@ -53,9 +53,9 @@ puts 'Testing read and delete from a new transaction:'
puts ' starting a new transaction'
conn.exec( 'BEGIN' )
fd = conn.lo_open( oid, PGconn::INV_READ )
fd = conn.lo_open( oid, PG::INV_READ )
puts ' reopened okay.'
conn.lo_lseek( fd, 50, PGconn::SEEK_END )
conn.lo_lseek( fd, 50, PG::SEEK_END )
buf = conn.lo_read( fd, 50 )
puts ' read okay.' if buf == SAMPLE_WRITE_DATA

View File

@ -31,7 +31,7 @@ BEGIN {
require 'pg'
conn = PGconn.connect( :dbname => 'test' )
conn = PG.connect( :dbname => 'test' )
conn.exec( 'LISTEN woo' ) # register interest in the 'woo' event
puts "Waiting up to 30 seconds for for an event!"

View File

@ -141,8 +141,8 @@ def PSQLexec(ps, query)
printf(STDERR, "%s\n", ps.db.error())
else
if (res.status() == PGresult::COMMAND_OK ||
res.status() == PGresult::TUPLES_OK)
if (res.status() == PG::COMMAND_OK ||
res.status() == PG::TUPLES_OK)
return res
end
@ -462,13 +462,13 @@ def do_connect(settings, new_dbname)
begin
printf("closing connection to database: %s\n", dbname);
settings.db = PGconn.connect(olddb.host, olddb.port, "", "", new_dbname)
settings.db = PG.connect(olddb.host, olddb.port, "", "", new_dbname)
printf("connecting to new database: %s\n", new_dbname)
olddb.finish()
rescue
printf(STDERR, "%s\n", $!)
printf("reconnecting to %s\n", dbname)
settings.db = PGconn.connect(olddb.host, olddb.port,"", "", dbname)
settings.db = PG.connect(olddb.host, olddb.port,"", "", dbname)
ensure
settings.prompt = settings.db.db + PROMPT
end
@ -832,7 +832,7 @@ def SendQuery(settings, query, copy_in, copy_out, copystream)
begin
results = settings.db.exec(query)
case results.status
when PGresult::TUPLES_OK
when PG::TUPLES_OK
success = TRUE
if settings.gfname
setFout(settings, settings.gfname)
@ -849,16 +849,16 @@ def SendQuery(settings, query, copy_in, copy_out, copystream)
end
results.clear
when PGresult::EMPTY_QUERY
when PG::EMPTY_QUERY
success = TRUE
when PGresult::COMMAND_OK
when PG::COMMAND_OK
success = TRUE
if !settings.quiet
printf("%s\n", results.cmdstatus)
end
when PGresult::COPY_OUT
when PG::COPY_OUT
success = TRUE
if copy_out
handleCopyOut(settings, copystream)
@ -870,7 +870,7 @@ def SendQuery(settings, query, copy_in, copy_out, copystream)
handleCopyOut(settings, STDOUT)
end
when PGresult::COPY_IN
when PG::COPY_IN
success = TRUE
if copy_in
handleCopyIn(settings, FALSE, copystream)
@ -879,7 +879,7 @@ def SendQuery(settings, query, copy_in, copy_out, copystream)
end
end
if (settings.db.status == PGconn::CONNECTION_BAD)
if (settings.db.status == PG::CONNECTION_BAD)
printf(STDERR, "We have lost the connection to the backend, so ")
printf(STDERR, "further processing is impossible. ")
printf(STDERR, "Terminating.\n")
@ -1129,10 +1129,10 @@ def main
dbname = "template1"
end
settings.db = PGconn.connect(host, port, "", "", dbname);
settings.db = PG.connect(host, port, "", "", dbname);
dbname = settings.db.db
if settings.db.status() == PGconn::CONNECTION_BAD
if settings.db.status() == PG::CONNECTION_BAD
printf(STDERR, "Connection to database '%s' failed.\n", dbname)
printf(STDERR, "%s", settings.db.error)
exit(1)

View File

@ -11,7 +11,7 @@ def main
pgtty = nil
dbname = "template1"
begin
conn = PGconn.connect(pghost,pgport,pgoptions,pgtty,dbname)
conn = PG.connect(pghost,pgport,pgoptions,pgtty,dbname)
if $DEBUG
fd = open("/tmp/trace.out","w")
conn.trace(fd)
@ -22,7 +22,7 @@ def main
res.clear
res = conn.exec("FETCH ALL in myportal")
if (res.result_status != PGresult::PGRES_TUPLES_OK)
if (res.result_status != PG::PGRES_TUPLES_OK)
raise PGerror,"FETCH ALL command didn't return tuples properly\n"
end
@ -46,7 +46,7 @@ def main
fl.close
end
rescue PGError
if (conn.status == PGconn::CONNECTION_BAD)
if (conn.status == PG::CONNECTION_BAD)
printf(STDERR, "We have lost the connection to the backend, so ")
printf(STDERR, "further processing is impossible. ")
printf(STDERR, "Terminating.\n")

View File

@ -20,7 +20,7 @@ def main
pgtty = nil
dbname = ENV['USER']
begin
conn = PGconn.connect(pghost,pgport,pgoptions,pgtty,dbname)
conn = PG.connect(pghost,pgport,pgoptions,pgtty,dbname)
rescue PGError
printf(STDERR, "Connection to database '%s' failed.\n",dbname)
exit(2)

View File

@ -20,8 +20,8 @@ def main
pgtty = nil
begin
conn1 = PGconn.connect(pghost,pgport,pgoptions,pgtty,dbname1)
conn2 = PGconn.connect(pghost,pgport,pgoptions,pgtty,dbname2)
conn1 = PG.connect(pghost,pgport,pgoptions,pgtty,dbname1)
conn2 = PG.connect(pghost,pgport,pgoptions,pgtty,dbname2)
rescue PGError
printf(STDERR,"connection to database.\n")
exit(1)
@ -33,7 +33,7 @@ def main
res1.clear
res1 = conn1.exec("FETCH ALL in myportal")
if (res1.status != PGresult::TUPLES_OK)
if (res1.status != PG::TUPLES_OK)
raise PGerror,"FETCH ALL command didn't return tuples properly\n"
end
@ -54,7 +54,7 @@ def main
conn1.close
rescue PGError
if (conn1.status == PGconn::CONNECTION_BAD)
if (conn1.status == PG::CONNECTION_BAD)
printf(STDERR, "We have lost the connection to the backend, so ")
printf(STDERR, "further processing is impossible. ")
printf(STDERR, "Terminating.\n")

View File

@ -4,7 +4,7 @@ require 'pg'
connhash = { :dbname => 'test' }
db = PGconn.connect( connhash )
db = PG.connect( connhash )
db.exec "DROP TABLE IF EXISTS test"
db.exec "CREATE TABLE test (a INTEGER, b BYTEA)"

View File

@ -3,17 +3,11 @@
require 'pathname'
require 'rspec'
require 'shellwords'
require 'pg'
TEST_DIRECTORY = Pathname.getwd + "tmp_test_specs"
RSpec.configure do |config|
ruby_version_vec = RUBY_VERSION.split('.').map {|c| c.to_i }.pack( "N*" )
config.mock_with :rspec
config.filter_run_excluding :ruby_19 => true if ruby_version_vec <= [1,9,1].pack( "N*" )
end
module PgTestingHelpers
module PG::TestingHelpers
# Set some ANSI escape code constants (Shamelessly stolen from Perl's
@ -221,7 +215,7 @@ module PgTestingHelpers
fail
end
conn = PGconn.connect( @conninfo )
conn = PG.connect( @conninfo )
conn.set_notice_processor do |message|
$stderr.puts( message ) if $DEBUG
end
@ -238,3 +232,12 @@ module PgTestingHelpers
end
RSpec.configure do |config|
ruby_version_vec = RUBY_VERSION.split('.').map {|c| c.to_i }.pack( "N*" )
config.include( PG::TestingHelpers )
config.mock_with :rspec
config.filter_run_excluding :ruby_19 => true if ruby_version_vec <= [1,9,1].pack( "N*" )
end

View File

@ -1,170 +0,0 @@
#!/usr/bin/env rspec
# encoding: utf-8
BEGIN {
require 'pathname'
require 'rbconfig'
basedir = Pathname( __FILE__ ).dirname.parent
libdir = basedir + 'lib'
archlib = libdir + RbConfig::CONFIG['sitearch']
$LOAD_PATH.unshift( basedir.to_s ) unless $LOAD_PATH.include?( basedir.to_s )
$LOAD_PATH.unshift( libdir.to_s ) unless $LOAD_PATH.include?( libdir.to_s )
$LOAD_PATH.unshift( archlib.to_s ) unless $LOAD_PATH.include?( archlib.to_s )
}
require 'rspec'
require 'spec/lib/helpers'
require 'pg'
describe "multinationalization support", :ruby_19 => true do
include PgTestingHelpers
before( :all ) do
@conn = setup_testing_db( "m17n" )
@conn.exec( 'BEGIN' )
end
after( :each ) do
@conn.exec( 'ROLLBACK' ) if @conn
end
after( :all ) do
teardown_testing_db( @conn ) if @conn
end
#
# Examples
#
it "should return the same bytes in text format that are sent as inline text" do
binary_file = File.join(Dir.pwd, 'spec/data', 'random_binary_data')
in_bytes = File.open(binary_file, 'r:ASCII-8BIT').read
out_bytes = nil
@conn.transaction do |conn|
conn.exec("SET standard_conforming_strings=on")
res = conn.exec("VALUES ('#{PGconn.escape_bytea(in_bytes)}'::bytea)", [], 0)
out_bytes = PGconn.unescape_bytea(res[0]['column1'])
end
out_bytes.should == in_bytes
end
describe "rubyforge #22925: m17n support" do
it "should return results in the same encoding as the client (iso-8859-1)" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'iso8859-1'
res = conn.exec("VALUES ('fantasia')", [], 0)
out_string = res[0]['column1']
end
out_string.should == 'fantasia'
out_string.encoding.should == Encoding::ISO8859_1
end
it "should return results in the same encoding as the client (utf-8)" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'utf-8'
res = conn.exec("VALUES ('世界線航跡蔵')", [], 0)
out_string = res[0]['column1']
end
out_string.should == '世界線航跡蔵'
out_string.encoding.should == Encoding::UTF_8
end
it "should return results in the same encoding as the client (EUC-JP)" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'EUC-JP'
stmt = "VALUES ('世界線航跡蔵')".encode('EUC-JP')
res = conn.exec(stmt, [], 0)
out_string = res[0]['column1']
end
out_string.should == '世界線航跡蔵'.encode('EUC-JP')
out_string.encoding.should == Encoding::EUC_JP
end
it "returns the results in the correct encoding even if the client_encoding has " +
"changed since the results were fetched" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'EUC-JP'
stmt = "VALUES ('世界線航跡蔵')".encode('EUC-JP')
res = conn.exec(stmt, [], 0)
conn.internal_encoding = 'utf-8'
out_string = res[0]['column1']
end
out_string.should == '世界線航跡蔵'.encode('EUC-JP')
out_string.encoding.should == Encoding::EUC_JP
end
it "the connection should return ASCII-8BIT when the server encoding is SQL_ASCII" do
@conn.external_encoding.should == Encoding::ASCII_8BIT
end
it "works around the unsupported JOHAB encoding by returning stuff in 'ASCII_8BIT'" do
pending "figuring out how to create a string in the JOHAB encoding" do
out_string = nil
@conn.transaction do |conn|
conn.exec( "set client_encoding = 'JOHAB';" )
stmt = "VALUES ('foo')".encode('JOHAB')
res = conn.exec( stmt, [], 0 )
out_string = res[0]['column1']
end
out_string.should == 'foo'.encode( Encoding::ASCII_8BIT )
out_string.encoding.should == Encoding::ASCII_8BIT
end
end
it "uses the client encoding for escaped string" do
original = "string to escape".force_encoding( "euc-jp" )
@conn.set_client_encoding( "euc_jp" )
escaped = @conn.escape( original )
escaped.encoding.should == Encoding::EUC_JP
end
end
describe "Ruby 1.9.x default_internal encoding" do
it "honors the Encoding.default_internal if it's set and the synchronous interface is used" do
@conn.transaction do |txn_conn|
txn_conn.internal_encoding = Encoding::ISO8859_1
txn_conn.exec( "CREATE TABLE defaultinternaltest ( foo text )" )
txn_conn.exec( "INSERT INTO defaultinternaltest VALUES ('Grün und Weiß')" )
end
begin
prev_encoding = Encoding.default_internal
Encoding.default_internal = Encoding::UTF_8
conn = PGconn.connect( @conninfo )
conn.internal_encoding.should == Encoding::UTF_8
res = conn.exec( "SELECT foo FROM defaultinternaltest" )
res[0]['foo'].encoding.should == Encoding::UTF_8
ensure
conn.finish if conn
Encoding.default_internal = prev_encoding
end
end
end
it "encodes exception messages with the connection's encoding (#96)" do
@conn.set_client_encoding( 'utf-8' )
@conn.exec "CREATE TABLE foo (bar TEXT)"
begin
@conn.exec "INSERT INTO foo VALUES ('Côte d'Ivoire')"
rescue => err
err.message.encoding.should == Encoding::UTF_8
else
fail "No exception raised?!"
end
end
end

View File

@ -1,30 +1,25 @@
#!/usr/bin/env rspec
# encoding: utf-8
#encoding: utf-8
BEGIN {
require 'pathname'
require 'rbconfig'
basedir = Pathname( __FILE__ ).dirname.parent
basedir = Pathname( __FILE__ ).dirname.parent.parent
libdir = basedir + 'lib'
archlib = libdir + Config::CONFIG['sitearch']
$LOAD_PATH.unshift( basedir.to_s ) unless $LOAD_PATH.include?( basedir.to_s )
$LOAD_PATH.unshift( libdir.to_s ) unless $LOAD_PATH.include?( libdir.to_s )
$LOAD_PATH.unshift( archlib.to_s ) unless $LOAD_PATH.include?( archlib.to_s )
}
require 'rspec'
require 'spec/lib/helpers'
require 'pg'
require 'timeout'
require 'pg'
describe PGconn do
include PgTestingHelpers
describe PG::Connection do
before( :all ) do
@conn = setup_testing_db( "PGconn" )
@conn = setup_testing_db( "PG_Connection" )
end
before( :each ) do
@ -45,7 +40,7 @@ describe PGconn do
#
it "can create a connection option string from a Hash of options" do
optstring = PGconn.parse_connect_args(
optstring = described_class.parse_connect_args(
:host => 'pgsql.example.com',
:dbname => 'db01',
'sslmode' => 'require'
@ -58,7 +53,7 @@ describe PGconn do
end
it "can create a connection option string from positional parameters" do
optstring = PGconn.parse_connect_args( 'pgsql.example.com', nil, '-c geqo=off', nil,
optstring = described_class.parse_connect_args( 'pgsql.example.com', nil, '-c geqo=off', nil,
'sales' )
optstring.should be_a( String )
@ -71,7 +66,7 @@ describe PGconn do
end
it "can create a connection option string from a mix of positional and hash parameters" do
optstring = PGconn.parse_connect_args( 'pgsql.example.com',
optstring = described_class.parse_connect_args( 'pgsql.example.com',
:dbname => 'licensing', :user => 'jrandom' )
optstring.should be_a( String )
@ -81,64 +76,94 @@ describe PGconn do
end
it "escapes single quotes and backslashes in connection parameters" do
PGconn.parse_connect_args( "DB 'browser' \\" ).should == "host='DB \\'browser\\' \\\\'"
described_class.parse_connect_args( "DB 'browser' \\" ).should == "host='DB \\'browser\\' \\\\'"
end
it "connects with defaults if no connection parameters are given" do
PGconn.parse_connect_args.should == ''
described_class.parse_connect_args.should == ''
end
it "connects successfully with connection string" do
tmpconn = PGconn.connect(@conninfo)
tmpconn.status.should== PGconn::CONNECTION_OK
tmpconn = described_class.connect(@conninfo)
tmpconn.status.should== PG::CONNECTION_OK
tmpconn.finish
end
it "connects using 7 arguments converted to strings" do
tmpconn = PGconn.connect('localhost', @port, nil, nil, :test, nil, nil)
tmpconn.status.should== PGconn::CONNECTION_OK
tmpconn = described_class.connect('localhost', @port, nil, nil, :test, nil, nil)
tmpconn.status.should== PG::CONNECTION_OK
tmpconn.finish
end
it "connects using a hash of connection parameters" do
tmpconn = PGconn.connect(
tmpconn = described_class.connect(
:host => 'localhost',
:port => @port,
:dbname => :test)
tmpconn.status.should== PGconn::CONNECTION_OK
tmpconn.status.should== PG::CONNECTION_OK
tmpconn.finish
end
it "raises an exception when connecting with an invalid number of arguments" do
expect {
PGconn.connect( 1, 2, 3, 4, 5, 6, 7, 'extra' )
described_class.connect( 1, 2, 3, 4, 5, 6, 7, 'extra' )
}.to raise_error( ArgumentError, /extra positional parameter/i )
end
it "can connect asynchronously" do
tmpconn = PGconn.connect_start(@conninfo)
socket = IO.for_fd(tmpconn.socket)
tmpconn = described_class.connect_start( @conninfo )
tmpconn.should be_a( described_class )
socket = IO.for_fd( tmpconn.socket )
status = tmpconn.connect_poll
while(status != PGconn::PGRES_POLLING_OK) do
if(status == PGconn::PGRES_POLLING_READING)
if(not select([socket],[],[],5.0))
while status != PG::PGRES_POLLING_OK
if status == PG::PGRES_POLLING_READING
select( [socket], [], [], 5.0 ) or
raise "Asynchronous connection timed out!"
end
elsif(status == PGconn::PGRES_POLLING_WRITING)
if(not select([],[socket],[],5.0))
elsif status == PG::PGRES_POLLING_WRITING
select( [], [socket], [], 5.0 ) or
raise "Asynchronous connection timed out!"
end
end
status = tmpconn.connect_poll
end
tmpconn.status.should== PGconn::CONNECTION_OK
tmpconn.status.should == PG::CONNECTION_OK
tmpconn.finish
end
it "can connect asynchronously for the duration of a block" do
conn = nil
described_class.connect_start(@conninfo) do |tmpconn|
tmpconn.should be_a( described_class )
conn = tmpconn
socket = IO.for_fd(tmpconn.socket)
status = tmpconn.connect_poll
while status != PG::PGRES_POLLING_OK
if status == PG::PGRES_POLLING_READING
if(not select([socket],[],[],5.0))
raise "Asynchronous connection timed out!"
end
elsif(status == PG::PGRES_POLLING_WRITING)
if(not select([],[socket],[],5.0))
raise "Asynchronous connection timed out!"
end
end
status = tmpconn.connect_poll
end
tmpconn.status.should == PG::CONNECTION_OK
end
conn.should be_finished()
end
it "doesn't leave stale server connections after finish" do
PGconn.connect(@conninfo).finish
described_class.connect(@conninfo).finish
sleep 0.5
res = @conn.exec(%[SELECT COUNT(*) AS n FROM pg_stat_activity
WHERE usename IS NOT NULL])
@ -213,13 +238,13 @@ describe PGconn do
@conn.send_query("SELECT pg_sleep(1000)")
@conn.cancel
tmpres = @conn.get_result
if(tmpres.result_status != PGresult::PGRES_TUPLES_OK)
if(tmpres.result_status != PG::PGRES_TUPLES_OK)
error = true
end
error.should == true
end
it "automatically rolls back a transaction started with PGconn#transaction if an exception " +
it "automatically rolls back a transaction started with described_class#transaction if an exception " +
"is raised" do
# abort the per-example transaction so we can test our own
@conn.exec( 'ROLLBACK' )
@ -241,10 +266,10 @@ describe PGconn do
it "not read past the end of a large object" do
@conn.transaction do
oid = @conn.lo_create( 0 )
fd = @conn.lo_open( oid, PGconn::INV_READ|PGconn::INV_WRITE )
fd = @conn.lo_open( oid, PG::INV_READ|PG::INV_WRITE )
@conn.lo_write( fd, "foobar" )
@conn.lo_read( fd, 10 ).should be_nil()
@conn.lo_lseek( fd, 0, PGconn::SEEK_SET )
@conn.lo_lseek( fd, 0, PG::SEEK_SET )
@conn.lo_read( fd, 10 ).should == 'foobar'
end
end
@ -256,7 +281,7 @@ describe PGconn do
pid = fork do
begin
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
sleep 1
conn.exec( 'NOTIFY woo' )
ensure
@ -277,7 +302,7 @@ describe PGconn do
pid = fork do
begin
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
sleep 1
conn.exec( 'NOTIFY woo' )
ensure
@ -304,7 +329,7 @@ describe PGconn do
pid = fork do
begin
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( 'NOTIFY woo' )
conn.exec( 'NOTIFY war' )
conn.exec( 'NOTIFY woz' )
@ -336,7 +361,7 @@ describe PGconn do
pid = fork do
begin
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( 'NOTIFY woo' )
ensure
conn.finish
@ -367,7 +392,7 @@ describe PGconn do
@conn.exec( 'LISTEN knees' )
pid = fork do
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( %Q{NOTIFY knees, 'skirt and boots'} )
conn.finish
exit!
@ -391,7 +416,7 @@ describe PGconn do
@conn.exec( 'LISTEN knees' )
pid = fork do
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( %Q{NOTIFY knees} )
conn.finish
exit!
@ -414,7 +439,7 @@ describe PGconn do
@conn.exec( 'LISTEN knees' )
pid = fork do
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( %Q{NOTIFY knees} )
conn.finish
exit!
@ -438,7 +463,7 @@ describe PGconn do
@conn.exec( 'LISTEN knees' )
pid = fork do
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( %Q{NOTIFY knees, 'skirt and boots'} )
conn.finish
exit!
@ -464,7 +489,7 @@ describe PGconn do
@conn.exec( 'LISTEN knees' )
pid = fork do
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( %Q{NOTIFY knees, 'skirt and boots'} )
conn.finish
exit!
@ -488,7 +513,7 @@ describe PGconn do
@conn.exec( 'LISTEN knees' )
pid = fork do
conn = PGconn.connect( @conninfo )
conn = described_class.connect( @conninfo )
conn.exec( %Q{NOTIFY knees, 'skirt and boots'} )
conn.finish
exit!
@ -512,7 +537,7 @@ describe PGconn do
it "yields the result if block is given to exec" do
rval = @conn.exec( "select 1234::int as a union select 5678::int as a" ) do |result|
values = []
result.should be_kind_of( PGresult )
result.should be_kind_of( PG::Result )
result.ntuples.should == 2
result.each do |tuple|
values << tuple['a']
@ -543,7 +568,7 @@ describe PGconn do
end
it "PGconn#block shouldn't block a second thread" do
it "described_class#block shouldn't block a second thread" do
t = Thread.new do
@conn.send_query( "select pg_sleep(3)" )
@conn.block
@ -556,7 +581,7 @@ describe PGconn do
t.join
end
it "PGconn#block should allow a timeout" do
it "described_class#block should allow a timeout" do
@conn.send_query( "select pg_sleep(3)" )
start = Time.now
@ -568,7 +593,7 @@ describe PGconn do
it "can encrypt a string given a password and username" do
PGconn.encrypt_password("postgres", "postgres").
described_class.encrypt_password("postgres", "postgres").
should =~ /\S+/
end
@ -576,13 +601,13 @@ describe PGconn do
it "raises an appropriate error if either of the required arguments for encrypt_password " +
"is not valid" do
expect {
PGconn.encrypt_password( nil, nil )
described_class.encrypt_password( nil, nil )
}.to raise_error( TypeError )
expect {
PGconn.encrypt_password( "postgres", nil )
described_class.encrypt_password( "postgres", nil )
}.to raise_error( TypeError )
expect {
PGconn.encrypt_password( nil, "postgres" )
described_class.encrypt_password( nil, "postgres" )
}.to raise_error( TypeError )
end
@ -627,14 +652,14 @@ describe PGconn do
it "can connect asynchronously" do
serv = TCPServer.new( '127.0.0.1', 54320 )
conn = PGconn.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" )
conn.connect_poll.should == PGconn::PGRES_POLLING_WRITING
conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" )
conn.connect_poll.should == PG::PGRES_POLLING_WRITING
select( nil, [IO.for_fd(conn.socket)], nil, 0.2 )
serv.close
if conn.connect_poll == PGconn::PGRES_POLLING_READING
if conn.connect_poll == PG::PGRES_POLLING_READING
select( [IO.for_fd(conn.socket)], nil, nil, 0.2 )
end
conn.connect_poll.should == PGconn::PGRES_POLLING_FAILED
conn.connect_poll.should == PG::PGRES_POLLING_FAILED
end
it "discards previous results (if any) before waiting on an #async_exec"
@ -647,4 +672,124 @@ describe PGconn do
result.should == { 'one' => '47' }
end
describe "multinationalization support", :ruby_19 => true do
it "should return the same bytes in text format that are sent as inline text" do
binary_file = File.join(Dir.pwd, 'spec/data', 'random_binary_data')
in_bytes = File.open(binary_file, 'r:ASCII-8BIT').read
escaped_bytes = described_class.escape_bytea( in_bytes )
out_bytes = nil
@conn.transaction do |conn|
conn.exec("SET standard_conforming_strings=on")
res = conn.exec("VALUES ('#{escaped_bytes}'::bytea)", [], 0)
out_bytes = described_class.unescape_bytea( res[0]['column1'] )
end
out_bytes.should == in_bytes
end
describe "rubyforge #22925: m17n support" do
it "should return results in the same encoding as the client (iso-8859-1)" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'iso8859-1'
res = conn.exec("VALUES ('fantasia')", [], 0)
out_string = res[0]['column1']
end
out_string.should == 'fantasia'
out_string.encoding.should == Encoding::ISO8859_1
end
it "should return results in the same encoding as the client (utf-8)" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'utf-8'
res = conn.exec("VALUES ('世界線航跡蔵')", [], 0)
out_string = res[0]['column1']
end
out_string.should == '世界線航跡蔵'
out_string.encoding.should == Encoding::UTF_8
end
it "should return results in the same encoding as the client (EUC-JP)" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'EUC-JP'
stmt = "VALUES ('世界線航跡蔵')".encode('EUC-JP')
res = conn.exec(stmt, [], 0)
out_string = res[0]['column1']
end
out_string.should == '世界線航跡蔵'.encode('EUC-JP')
out_string.encoding.should == Encoding::EUC_JP
end
it "returns the results in the correct encoding even if the client_encoding has " +
"changed since the results were fetched" do
out_string = nil
@conn.transaction do |conn|
conn.internal_encoding = 'EUC-JP'
stmt = "VALUES ('世界線航跡蔵')".encode('EUC-JP')
res = conn.exec(stmt, [], 0)
conn.internal_encoding = 'utf-8'
out_string = res[0]['column1']
end
out_string.should == '世界線航跡蔵'.encode('EUC-JP')
out_string.encoding.should == Encoding::EUC_JP
end
it "the connection should return ASCII-8BIT when the server encoding is SQL_ASCII" do
@conn.external_encoding.should == Encoding::ASCII_8BIT
end
it "works around the unsupported JOHAB encoding by returning stuff in 'ASCII_8BIT'" do
pending "figuring out how to create a string in the JOHAB encoding" do
out_string = nil
@conn.transaction do |conn|
conn.exec( "set client_encoding = 'JOHAB';" )
stmt = "VALUES ('foo')".encode('JOHAB')
res = conn.exec( stmt, [], 0 )
out_string = res[0]['column1']
end
out_string.should == 'foo'.encode( Encoding::ASCII_8BIT )
out_string.encoding.should == Encoding::ASCII_8BIT
end
end
it "uses the client encoding for escaped string" do
original = "string to escape".force_encoding( "euc-jp" )
@conn.set_client_encoding( "euc_jp" )
escaped = @conn.escape( original )
escaped.encoding.should == Encoding::EUC_JP
end
end
describe "Ruby 1.9.x default_internal encoding" do
it "honors the Encoding.default_internal if it's set and the synchronous interface is used" do
@conn.transaction do |txn_conn|
txn_conn.internal_encoding = Encoding::ISO8859_1
txn_conn.exec( "CREATE TABLE defaultinternaltest ( foo text )" )
txn_conn.exec( "INSERT INTO defaultinternaltest VALUES ('Grün und Weiß')" )
end
begin
prev_encoding = Encoding.default_internal
Encoding.default_internal = Encoding::UTF_8
conn = described_class.connect( @conninfo )
conn.internal_encoding.should == Encoding::UTF_8
res = conn.exec( "SELECT foo FROM defaultinternaltest" )
res[0]['foo'].encoding.should == Encoding::UTF_8
ensure
conn.finish if conn
Encoding.default_internal = prev_encoding
end
end
end
end
end

View File

@ -3,26 +3,22 @@
BEGIN {
require 'pathname'
require 'rbconfig'
basedir = Pathname( __FILE__ ).dirname.parent
basedir = Pathname( __FILE__ ).dirname.parent.parent
libdir = basedir + 'lib'
archlib = libdir + Config::CONFIG['sitearch']
$LOAD_PATH.unshift( basedir.to_s ) unless $LOAD_PATH.include?( basedir.to_s )
$LOAD_PATH.unshift( libdir.to_s ) unless $LOAD_PATH.include?( libdir.to_s )
$LOAD_PATH.unshift( archlib.to_s ) unless $LOAD_PATH.include?( archlib.to_s )
}
require 'rspec'
require 'spec/lib/helpers'
require 'pg'
describe PGresult do
include PgTestingHelpers
describe PG::Result do
before( :all ) do
@conn = setup_testing_db( "PGresult" )
@conn = setup_testing_db( "PG_Result" )
end
before( :each ) do
@ -63,20 +59,20 @@ describe PGresult do
result = exception.result
result.should be_a( PGresult )
result.error_field( PGresult::PG_DIAG_SEVERITY ).should == 'ERROR'
result.error_field( PGresult::PG_DIAG_SQLSTATE ).should == '42P01'
result.error_field( PGresult::PG_DIAG_MESSAGE_PRIMARY ).
result.should be_a( described_class() )
result.error_field( PG::PG_DIAG_SEVERITY ).should == 'ERROR'
result.error_field( PG::PG_DIAG_SQLSTATE ).should == '42P01'
result.error_field( PG::PG_DIAG_MESSAGE_PRIMARY ).
should == 'relation "nonexistant_table" does not exist'
result.error_field( PGresult::PG_DIAG_MESSAGE_DETAIL ).should be_nil()
result.error_field( PGresult::PG_DIAG_MESSAGE_HINT ).should be_nil()
result.error_field( PGresult::PG_DIAG_STATEMENT_POSITION ).should == '15'
result.error_field( PGresult::PG_DIAG_INTERNAL_POSITION ).should be_nil()
result.error_field( PGresult::PG_DIAG_INTERNAL_QUERY ).should be_nil()
result.error_field( PGresult::PG_DIAG_CONTEXT ).should be_nil()
result.error_field( PGresult::PG_DIAG_SOURCE_FILE ).should =~ /parse_relation\.c$/
result.error_field( PGresult::PG_DIAG_SOURCE_LINE ).should == '857'
result.error_field( PGresult::PG_DIAG_SOURCE_FUNCTION ).should == 'parserOpenTable'
result.error_field( PG::PG_DIAG_MESSAGE_DETAIL ).should be_nil()
result.error_field( PG::PG_DIAG_MESSAGE_HINT ).should be_nil()
result.error_field( PG::PG_DIAG_STATEMENT_POSITION ).should == '15'
result.error_field( PG::PG_DIAG_INTERNAL_POSITION ).should be_nil()
result.error_field( PG::PG_DIAG_INTERNAL_QUERY ).should be_nil()
result.error_field( PG::PG_DIAG_CONTEXT ).should be_nil()
result.error_field( PG::PG_DIAG_SOURCE_FILE ).should =~ /parse_relation\.c$/
result.error_field( PG::PG_DIAG_SOURCE_LINE ).should == '857'
result.error_field( PG::PG_DIAG_SOURCE_FUNCTION ).should == 'parserOpenTable'
end
@ -85,7 +81,7 @@ describe PGresult do
begin
res = @conn.exec("SELECT 1/0")
rescue PGError => e
sqlstate = e.result.result_error_field( PGresult::PG_DIAG_SQLSTATE ).to_i
sqlstate = e.result.result_error_field( PG::PG_DIAG_SQLSTATE ).to_i
end
sqlstate.should == 22012
end
@ -103,7 +99,7 @@ describe PGresult do
in_bytes = File.open(binary_file, 'rb').read
out_bytes = nil
@conn.exec("SET standard_conforming_strings=on")
res = @conn.exec("VALUES ('#{PGconn.escape_bytea(in_bytes)}'::bytea)", [], 1)
res = @conn.exec("VALUES ('#{PG::Connection.escape_bytea(in_bytes)}'::bytea)", [], 1)
out_bytes = res[0]['column1']
out_bytes.should == in_bytes
end
@ -113,7 +109,7 @@ describe PGresult do
bytes = File.open(binary_file, 'rb').read
res = @conn.exec('VALUES ($1::bytea)',
[ { :value => bytes, :format => 1 } ])
PGconn.unescape_bytea(res[0]['column1']).should== bytes
PG::Connection.unescape_bytea(res[0]['column1']).should== bytes
end
it "should return the same bytes in text format that are sent as inline text" do
@ -122,8 +118,8 @@ describe PGresult do
out_bytes = nil
@conn.exec("SET standard_conforming_strings=on")
res = @conn.exec("VALUES ('#{PGconn.escape_bytea(in_bytes)}'::bytea)", [], 0)
out_bytes = PGconn.unescape_bytea(res[0]['column1'])
res = @conn.exec("VALUES ('#{PG::Connection.escape_bytea(in_bytes)}'::bytea)", [], 0)
out_bytes = PG::Connection.unescape_bytea(res[0]['column1'])
out_bytes.should == in_bytes
end
@ -175,19 +171,19 @@ describe PGresult do
res.fmod( 0 ).should == 33 + 4 # Column length + varlena size (4)
end
it "should raise an exception when an invalid index is passed to PGresult#fmod" do
it "should raise an exception when an invalid index is passed to PG::Result#fmod" do
@conn.exec( 'CREATE TABLE fmodtest ( foo varchar(33) )' )
res = @conn.exec( 'SELECT * FROM fmodtest' )
expect { res.fmod(1) }.to raise_error( ArgumentError )
end
it "should raise an exception when an invalid (negative) index is passed to PGresult#fmod" do
it "should raise an exception when an invalid (negative) index is passed to PG::Result#fmod" do
@conn.exec( 'CREATE TABLE fmodtest ( foo varchar(33) )' )
res = @conn.exec( 'SELECT * FROM fmodtest' )
expect { res.fmod(-11) }.to raise_error( ArgumentError )
end
it "shouldn't raise an exception when a valid index is passed to PGresult#fmod for a column with no typemod" do
it "shouldn't raise an exception when a valid index is passed to PG::Result#fmod for a column with no typemod" do
@conn.exec( 'CREATE TABLE fmodtest ( foo text )' )
res = @conn.exec( 'SELECT * FROM fmodtest' )
res.fmod( 0 ).should == -1 # and it shouldn't raise an exception, either
@ -201,25 +197,25 @@ describe PGresult do
res.ftable( 0 ).should == be_nonzero()
end
it "should raise an exception when an invalid index is passed to PGresult#ftable" do
it "should raise an exception when an invalid index is passed to PG::Result#ftable" do
@conn.exec( 'CREATE TABLE ftabletest ( foo text )' )
res = @conn.exec( 'SELECT * FROM ftabletest' )
expect { res.ftable(18) }.to raise_error( ArgumentError )
end
it "should raise an exception when an invalid (negative) index is passed to PGresult#ftable" do
it "should raise an exception when an invalid (negative) index is passed to PG::Result#ftable" do
@conn.exec( 'CREATE TABLE ftabletest ( foo text )' )
res = @conn.exec( 'SELECT * FROM ftabletest' )
expect { res.ftable(-2) }.to raise_error( ArgumentError )
end
it "shouldn't raise an exception when a valid index is passed to PGresult#ftable for a " +
it "shouldn't raise an exception when a valid index is passed to PG::Result#ftable for a " +
"column with no corresponding table" do
@conn.exec( 'CREATE TABLE ftabletest ( foo text )' )
res = @conn.exec( 'SELECT foo, LENGTH(foo) as length FROM ftabletest' )
res.ftable( 1 ).should == PGresult::InvalidOid # and it shouldn't raise an exception, either
res.ftable( 1 ).should == PG::INVALID_OID # and it shouldn't raise an exception, either
end
# PQftablecol
@ -231,21 +227,21 @@ describe PGresult do
res.ftablecol( 1 ).should == 2
end
it "should raise an exception when an invalid index is passed to PGresult#ftablecol" do
it "should raise an exception when an invalid index is passed to PG::Result#ftablecol" do
@conn.exec( 'CREATE TABLE ftablecoltest ( foo text, bar numeric )' )
res = @conn.exec( 'SELECT * FROM ftablecoltest' )
expect { res.ftablecol(32) }.to raise_error( ArgumentError )
end
it "should raise an exception when an invalid (negative) index is passed to PGresult#ftablecol" do
it "should raise an exception when an invalid (negative) index is passed to PG::Result#ftablecol" do
@conn.exec( 'CREATE TABLE ftablecoltest ( foo text, bar numeric )' )
res = @conn.exec( 'SELECT * FROM ftablecoltest' )
expect { res.ftablecol(-1) }.to raise_error( ArgumentError )
end
it "shouldn't raise an exception when a valid index is passed to PGresult#ftablecol for a " +
it "shouldn't raise an exception when a valid index is passed to PG::Result#ftablecol for a " +
"column with no corresponding table" do
@conn.exec( 'CREATE TABLE ftablecoltest ( foo text )' )
res = @conn.exec( 'SELECT foo, LENGTH(foo) as length FROM ftablecoltest' )

22
spec/pg_spec.rb Normal file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env rspec
# encoding: utf-8
BEGIN {
require 'pathname'
basedir = Pathname( __FILE__ ).dirname.parent
libdir = basedir + 'lib'
$LOAD_PATH.unshift( basedir.to_s ) unless $LOAD_PATH.include?( basedir.to_s )
$LOAD_PATH.unshift( libdir.to_s ) unless $LOAD_PATH.include?( libdir.to_s )
}
require 'rspec'
require 'spec/lib/helpers'
require 'pg'
describe PG do
end