AnyMongo
view release on metacpan or search on metacpan
AnyMongo.xs view on Meta::CPAN
int start = buf.pos-buf.start;
SV **obj = av_fetch(a, i, 0);
perl_mongo_sv_to_bson(&buf, *obj, ids);
if (buf.pos - (buf.start + start) > MAX_OBJ_SIZE) {
croak("insert is larger than 4 MB: %d bytes", buf.pos - (buf.start + start));
}
}
perl_mongo_serialize_size(buf.start, &buf);
XPUSHs(sv_2mortal(newSVpvn(buf.start, buf.pos-buf.start)));
XPUSHs(sv_2mortal(newRV_noinc((SV*)ids)));
Safefree(buf.start);
SV*
build_remove_message(request_id,ns, criteria, flags)
SV *request_id;
char *ns
SV *criteria
int flags
PREINIT:
buffer buf;
mongo_msg_header header;
CODE:
CREATE_BUF(INITIAL_BUF_SIZE);
CREATE_HEADER(buf, ns, OP_DELETE);
perl_mongo_serialize_int(&buf, flags);
perl_mongo_sv_to_bson(&buf, criteria, NO_PREP);
perl_mongo_serialize_size(buf.start, &buf);
RETVAL = newSVpvn(buf.start, buf.pos-buf.start);
Safefree(buf.start);
OUTPUT:
RETVAL
SV*
build_update_message(request_id,ns, criteria, obj, flags)
SV *request_id;
char *ns
SV *criteria
SV *obj
int flags
PREINIT:
buffer buf;
mongo_msg_header header;
CODE:
CREATE_BUF(INITIAL_BUF_SIZE);
CREATE_HEADER(buf, ns, OP_UPDATE);
perl_mongo_serialize_int(&buf, flags);
perl_mongo_sv_to_bson(&buf, criteria, NO_PREP);
perl_mongo_sv_to_bson(&buf, obj, NO_PREP);
perl_mongo_serialize_size(buf.start, &buf);
RETVAL = newSVpvn(buf.start, buf.pos-buf.start);
Safefree(buf.start);
OUTPUT:
RETVAL
SV*
build_get_more_message(request_id,ns, cursor_id,size)
SV *request_id
char *ns
SV *cursor_id
int size
PREINIT:
buffer buf;
mongo_msg_header header;
CODE:
size = 34+strlen(ns);
New(0, buf.start, size, char);
buf.pos = buf.start;
buf.end = buf.start + size;
CREATE_RESPONSE_HEADER(buf, ns, SvIV(request_id), OP_GET_MORE);
perl_mongo_serialize_int(&buf, size);
perl_mongo_serialize_long(&buf, (int64_t)SvIV(cursor_id));
perl_mongo_serialize_size(buf.start, &buf);
// CREATE_BUF(INITIAL_BUF_SIZE);
// // standard message head
// CREATE_MSG_HEADER(SvIV(request_id), 0, OP_GET_MORE);
// APPEND_HEADER_NS(buf, ns, 0);
// // batch size
// perl_mongo_serialize_int(&buf, SvIV(size));
// // cursor id
// perl_mongo_serialize_long(&buf, (int64_t) SvIV(cursor_id));
// perl_mongo_serialize_size(buf.start, &buf);
RETVAL = newSVpvn(buf.start, buf.pos-buf.start);
Safefree(buf.start);
OUTPUT:
RETVAL
SV*
build_kill_cursor_message(request_id_sv,cursor_id)
SV *request_id_sv
SV *cursor_id
PREINIT:
buffer buf;
char quickbuf[128];
mongo_msg_header header;
CODE:
buf.pos = quickbuf;
buf.start = buf.pos;
buf.end = buf.start + 128;
// std header
CREATE_MSG_HEADER(SvIV(request_id_sv), 0, OP_KILL_CURSORS);
APPEND_HEADER(buf, 0);
// # of cursors
perl_mongo_serialize_int(&buf, 1);
// cursor ids
perl_mongo_serialize_long(&buf, (int64_t)SvIV(cursor_id));
perl_mongo_serialize_size(buf.start, &buf);
RETVAL = newSVpvn(buf.start, buf.pos-buf.start);
OUTPUT:
RETVAL
SV*
decode_bson_documents(SV *documents)
PREINIT:
buffer buf;
AV *ret;
char *bson;
CODE:
ret = newAV ();
bson = SvPVbyte_nolen(documents);
buf.start = bson;
buf.end = buf.start + SvCUR(documents);
buf.pos = buf.start;
int i =0;
// warn("buf.start:%p buf.end:%p document_lenth:%d",buf.start,buf.end,(int)SvCUR(documents));
do {
SV *sv;
// warn("perl_mongo_bson_to_sv...\n");
sv = perl_mongo_bson_to_sv(&buf);
// warn("perl_mongo_bson_to_sv END...\n");
av_push (ret, sv);
buf.start = buf.pos;
} while( buf.pos < buf.end);
RETVAL = newRV_noinc ((SV *)ret);
OUTPUT:
RETVAL
( run in 1.004 second using v1.01-cache-2.11-cpan-39bf76dae61 )