Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[0] 
  177  if PYTHON_VERSION == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  192                   types.BuiltinFunctionType, 
  193                   types.MethodType, types.BuiltinMethodType) 
  194   
  195  TABLE_ARGS = set( 
  196      ('migrate','primarykey','fake_migrate','format','redefine', 
  197       'singular','plural','trigger_name','sequence_name','fields', 
  198       'common_filter','polymodel','table_class','on_define','actual_name')) 
  199   
  200  SELECT_ARGS = set( 
  201      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  202       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  203   
  204  ogetattr = object.__getattribute__ 
  205  osetattr = object.__setattr__ 
  206  exists = os.path.exists 
  207  pjoin = os.path.join 
  208   
  209  ################################################################################### 
  210  # following checks allow the use of dal without web2py, as a standalone module 
  211  ################################################################################### 
  212  try: 
  213      from gluon.utils import web2py_uuid 
  214  except (ImportError, SystemError): 
  215      import uuid 
216 - def web2py_uuid(): return str(uuid.uuid4())
217 218 try: 219 import portalocker 220 have_portalocker = True 221 except ImportError: 222 have_portalocker = False 223 224 try: 225 from gluon import serializers 226 have_serializers = True 227 except ImportError: 228 have_serializers = False 229 try: 230 import json as simplejson 231 except ImportError: 232 try: 233 import gluon.contrib.simplejson as simplejson 234 except ImportError: 235 simplejson = None 236 237 LOGGER = logging.getLogger("web2py.dal") 238 DEFAULT = lambda:0 239 240 GLOBAL_LOCKER = threading.RLock() 241 THREAD_LOCAL = threading.local() 242 243 # internal representation of tables with field 244 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 245 246 REGEX_TYPE = re.compile('^([\w\_\:]+)') 247 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 248 REGEX_W = re.compile('^\w+$') 249 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 250 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$') 251 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 252 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 253 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 254 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 255 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 256 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 257 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 258 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 259 REGEX_QUOTES = re.compile("'[^']*'") 260 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 261 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 262 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 263 264 # list of drivers will be built on the fly 265 # and lists only what is available 266 DRIVERS = [] 267 268 try: 269 from new import classobj 270 from google.appengine.ext import db as gae 271 from google.appengine.api import namespace_manager, rdbms 272 from google.appengine.api.datastore_types import Key ### for belongs on ID 273 from google.appengine.ext.db.polymodel import PolyModel 274 DRIVERS.append('google') 275 except ImportError: 276 pass 277 278 if not 'google' in DRIVERS: 279 280 try: 281 from pysqlite2 import dbapi2 as sqlite2 282 DRIVERS.append('SQLite(sqlite2)') 283 except ImportError: 284 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 285 286 try: 287 from sqlite3 import dbapi2 as sqlite3 288 DRIVERS.append('SQLite(sqlite3)') 289 except ImportError: 290 LOGGER.debug('no SQLite drivers sqlite3') 291 292 try: 293 # first try contrib driver, then from site-packages (if installed) 294 try: 295 import gluon.contrib.pymysql as pymysql 296 # monkeypatch pymysql because they havent fixed the bug: 297 # https://github.com/petehunt/PyMySQL/issues/86 298 pymysql.ESCAPE_REGEX = re.compile("'") 299 pymysql.ESCAPE_MAP = {"'": "''"} 300 # end monkeypatch 301 except ImportError: 302 import pymysql 303 DRIVERS.append('MySQL(pymysql)') 304 except ImportError: 305 LOGGER.debug('no MySQL driver pymysql') 306 307 try: 308 import MySQLdb 309 DRIVERS.append('MySQL(MySQLdb)') 310 except ImportError: 311 LOGGER.debug('no MySQL driver MySQLDB') 312 313 try: 314 import mysql.connector as mysqlconnector 315 DRIVERS.append("MySQL(mysqlconnector)") 316 except ImportError: 317 LOGGER.debug("no driver mysql.connector") 318 319 try: 320 import psycopg2 321 from psycopg2.extensions import adapt as psycopg2_adapt 322 DRIVERS.append('PostgreSQL(psycopg2)') 323 except ImportError: 324 LOGGER.debug('no PostgreSQL driver psycopg2') 325 326 try: 327 # first try contrib driver, then from site-packages (if installed) 328 try: 329 import gluon.contrib.pg8000.dbapi as pg8000 330 except ImportError: 331 import pg8000.dbapi as pg8000 332 DRIVERS.append('PostgreSQL(pg8000)') 333 except ImportError: 334 LOGGER.debug('no PostgreSQL driver pg8000') 335 336 try: 337 import cx_Oracle 338 DRIVERS.append('Oracle(cx_Oracle)') 339 except ImportError: 340 LOGGER.debug('no Oracle driver cx_Oracle') 341 342 try: 343 try: 344 import pyodbc 345 except ImportError: 346 try: 347 import gluon.contrib.pypyodbc as pyodbc 348 except Exception, e: 349 raise ImportError(str(e)) 350 DRIVERS.append('MSSQL(pyodbc)') 351 DRIVERS.append('DB2(pyodbc)') 352 DRIVERS.append('Teradata(pyodbc)') 353 DRIVERS.append('Ingres(pyodbc)') 354 except ImportError: 355 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 356 357 try: 358 import Sybase 359 DRIVERS.append('Sybase(Sybase)') 360 except ImportError: 361 LOGGER.debug('no Sybase driver') 362 363 try: 364 import kinterbasdb 365 DRIVERS.append('Interbase(kinterbasdb)') 366 DRIVERS.append('Firebird(kinterbasdb)') 367 except ImportError: 368 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 369 370 try: 371 import fdb 372 DRIVERS.append('Firebird(fdb)') 373 except ImportError: 374 LOGGER.debug('no Firebird driver fdb') 375 ##### 376 try: 377 import firebirdsql 378 DRIVERS.append('Firebird(firebirdsql)') 379 except ImportError: 380 LOGGER.debug('no Firebird driver firebirdsql') 381 382 try: 383 import informixdb 384 DRIVERS.append('Informix(informixdb)') 385 LOGGER.warning('Informix support is experimental') 386 except ImportError: 387 LOGGER.debug('no Informix driver informixdb') 388 389 try: 390 import sapdb 391 DRIVERS.append('SQL(sapdb)') 392 LOGGER.warning('SAPDB support is experimental') 393 except ImportError: 394 LOGGER.debug('no SAP driver sapdb') 395 396 try: 397 import cubriddb 398 DRIVERS.append('Cubrid(cubriddb)') 399 LOGGER.warning('Cubrid support is experimental') 400 except ImportError: 401 LOGGER.debug('no Cubrid driver cubriddb') 402 403 try: 404 from com.ziclix.python.sql import zxJDBC 405 import java.sql 406 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 407 from org.sqlite import JDBC # required by java.sql; ensure we have it 408 zxJDBC_sqlite = java.sql.DriverManager 409 DRIVERS.append('PostgreSQL(zxJDBC)') 410 DRIVERS.append('SQLite(zxJDBC)') 411 LOGGER.warning('zxJDBC support is experimental') 412 is_jdbc = True 413 except ImportError: 414 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 415 is_jdbc = False 416 417 try: 418 import couchdb 419 DRIVERS.append('CouchDB(couchdb)') 420 except ImportError: 421 LOGGER.debug('no Couchdb driver couchdb') 422 423 try: 424 import pymongo 425 DRIVERS.append('MongoDB(pymongo)') 426 except: 427 LOGGER.debug('no MongoDB driver pymongo') 428 429 try: 430 import imaplib 431 DRIVERS.append('IMAP(imaplib)') 432 except: 433 LOGGER.debug('no IMAP driver imaplib') 434 435 PLURALIZE_RULES = [ 436 (re.compile('child$'), re.compile('child$'), 'children'), 437 (re.compile('oot$'), re.compile('oot$'), 'eet'), 438 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 439 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 440 (re.compile('sis$'), re.compile('sis$'), 'ses'), 441 (re.compile('man$'), re.compile('man$'), 'men'), 442 (re.compile('ife$'), re.compile('ife$'), 'ives'), 443 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 444 (re.compile('lf$'), re.compile('lf$'), 'lves'), 445 (re.compile('[sxz]$'), re.compile('$'), 'es'), 446 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 447 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 448 (re.compile('$'), re.compile('$'), 's'), 449 ]
450 451 -def pluralize(singular, rules=PLURALIZE_RULES):
452 for line in rules: 453 re_search, re_sub, replace = line 454 plural = re_search.search(singular) and re_sub.sub(replace, singular) 455 if plural: return plural
456
457 -def hide_password(uri):
458 if isinstance(uri,(list,tuple)): 459 return [hide_password(item) for item in uri] 460 return REGEX_NOPASSWD.sub('******',uri)
461
462 -def OR(a,b):
463 return a|b
464
465 -def AND(a,b):
466 return a&b
467
468 -def IDENTITY(x): return x
469
470 -def varquote_aux(name,quotestr='%s'):
471 return name if REGEX_W.match(name) else quotestr % name
472
473 -def quote_keyword(a,keyword='timestamp'):
474 regex = re.compile('\.keyword(?=\w)') 475 a = regex.sub('."%s"' % keyword,a) 476 return a
477 478 if 'google' in DRIVERS: 479 480 is_jdbc = False
481 482 - class GAEDecimalProperty(gae.Property):
483 """ 484 GAE decimal implementation 485 """ 486 data_type = decimal.Decimal 487
488 - def __init__(self, precision, scale, **kwargs):
489 super(GAEDecimalProperty, self).__init__(self, **kwargs) 490 d = '1.' 491 for x in range(scale): 492 d += '0' 493 self.round = decimal.Decimal(d)
494
495 - def get_value_for_datastore(self, model_instance):
496 value = super(GAEDecimalProperty, self)\ 497 .get_value_for_datastore(model_instance) 498 if value is None or value == '': 499 return None 500 else: 501 return str(value)
502
503 - def make_value_from_datastore(self, value):
504 if value is None or value == '': 505 return None 506 else: 507 return decimal.Decimal(value).quantize(self.round)
508
509 - def validate(self, value):
510 value = super(GAEDecimalProperty, self).validate(value) 511 if value is None or isinstance(value, decimal.Decimal): 512 return value 513 elif isinstance(value, basestring): 514 return decimal.Decimal(value) 515 raise gae.BadValueError("Property %s must be a Decimal or string."\ 516 % self.name)
517
518 ################################################################################### 519 # class that handles connection pooling (all adapters are derived from this one) 520 ################################################################################### 521 522 -class ConnectionPool(object):
523 524 POOLS = {} 525 check_active_connection = True 526 527 @staticmethod
528 - def set_folder(folder):
530 531 # ## this allows gluon to commit/rollback all dbs in this thread 532
533 - def close(self,action='commit',really=True):
534 if action: 535 if callable(action): 536 action(self) 537 else: 538 getattr(self, action)() 539 # ## if you want pools, recycle this connection 540 if self.pool_size: 541 GLOBAL_LOCKER.acquire() 542 pool = ConnectionPool.POOLS[self.uri] 543 if len(pool) < self.pool_size: 544 pool.append(self.connection) 545 really = False 546 GLOBAL_LOCKER.release() 547 if really: 548 self.close_connection() 549 self.connection = None
550 551 @staticmethod
552 - def close_all_instances(action):
553 """ to close cleanly databases in a multithreaded environment """ 554 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 555 for db_uid, db_group in dbs: 556 for db in db_group: 557 if hasattr(db,'_adapter'): 558 db._adapter.close(action) 559 getattr(THREAD_LOCAL,'db_instances',{}).clear() 560 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 561 if callable(action): 562 action(None) 563 return
564
565 - def find_or_make_work_folder(self):
566 """ this actually does not make the folder. it has to be there """ 567 self.folder = getattr(THREAD_LOCAL,'folder','') 568 569 if (os.path.isabs(self.folder) and 570 isinstance(self, UseDatabaseStoredFile) and 571 self.folder.startswith(os.getcwd())): 572 self.folder = os.path.relpath(self.folder, os.getcwd()) 573 574 # Creating the folder if it does not exist 575 if False and self.folder and not exists(self.folder): 576 os.mkdir(self.folder)
577
578 - def after_connection_hook(self):
579 """hook for the after_connection parameter""" 580 if callable(self._after_connection): 581 self._after_connection(self) 582 self.after_connection()
583
584 - def after_connection(self):
585 """ this it is supposed to be overloaded by adapters""" 586 pass
587
588 - def reconnect(self, f=None, cursor=True):
589 """ 590 this function defines: self.connection and self.cursor 591 (iff cursor is True) 592 if self.pool_size>0 it will try pull the connection from the pool 593 if the connection is not active (closed by db server) it will loop 594 if not self.pool_size or no active connections in pool makes a new one 595 """ 596 if getattr(self,'connection', None) != None: 597 return 598 if f is None: 599 f = self.connector 600 601 # if not hasattr(self, "driver") or self.driver is None: 602 # LOGGER.debug("Skipping connection since there's no driver") 603 # return 604 605 if not self.pool_size: 606 self.connection = f() 607 self.cursor = cursor and self.connection.cursor() 608 else: 609 uri = self.uri 610 POOLS = ConnectionPool.POOLS 611 while True: 612 GLOBAL_LOCKER.acquire() 613 if not uri in POOLS: 614 POOLS[uri] = [] 615 if POOLS[uri]: 616 self.connection = POOLS[uri].pop() 617 GLOBAL_LOCKER.release() 618 self.cursor = cursor and self.connection.cursor() 619 try: 620 if self.cursor and self.check_active_connection: 621 self.execute('SELECT 1;') 622 break 623 except: 624 pass 625 else: 626 GLOBAL_LOCKER.release() 627 self.connection = f() 628 self.cursor = cursor and self.connection.cursor() 629 break 630 self.after_connection_hook()
631
632 633 ################################################################################### 634 # this is a generic adapter that does nothing; all others are derived from this one 635 ################################################################################### 636 637 -class BaseAdapter(ConnectionPool):
638 native_json = False 639 driver = None 640 driver_name = None 641 drivers = () # list of drivers from which to pick 642 connection = None 643 commit_on_alter_table = False 644 support_distributed_transaction = False 645 uploads_in_blob = False 646 can_select_for_update = True 647 dbpath = None 648 folder = None 649 650 TRUE = 'T' 651 FALSE = 'F' 652 T_SEP = ' ' 653 QUOTE_TEMPLATE = '"%s"' 654 655 types = { 656 'boolean': 'CHAR(1)', 657 'string': 'CHAR(%(length)s)', 658 'text': 'TEXT', 659 'json': 'TEXT', 660 'password': 'CHAR(%(length)s)', 661 'blob': 'BLOB', 662 'upload': 'CHAR(%(length)s)', 663 'integer': 'INTEGER', 664 'bigint': 'INTEGER', 665 'float':'DOUBLE', 666 'double': 'DOUBLE', 667 'decimal': 'DOUBLE', 668 'date': 'DATE', 669 'time': 'TIME', 670 'datetime': 'TIMESTAMP', 671 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 672 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 673 'list:integer': 'TEXT', 674 'list:string': 'TEXT', 675 'list:reference': 'TEXT', 676 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 677 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 678 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 679 } 680
681 - def isOperationalError(self,exception):
682 if not hasattr(self.driver, "OperationalError"): 683 return None 684 return isinstance(exception, self.driver.OperationalError)
685
686 - def isProgrammingError(self,exception):
687 if not hasattr(self.driver, "ProgrammingError"): 688 return None 689 return isinstance(exception, self.driver.ProgrammingError)
690
691 - def id_query(self, table):
692 pkeys = getattr(table,'_primarykey',None) 693 if pkeys: 694 return table[pkeys[0]] != None 695 else: 696 return table._id != None
697
698 - def adapt(self, obj):
699 return "'%s'" % obj.replace("'", "''")
700
701 - def smart_adapt(self, obj):
702 if isinstance(obj,(int,float)): 703 return str(obj) 704 return self.adapt(str(obj))
705
706 - def file_exists(self, filename):
707 """ 708 to be used ONLY for files that on GAE may not be on filesystem 709 """ 710 return exists(filename)
711
712 - def file_open(self, filename, mode='rb', lock=True):
713 """ 714 to be used ONLY for files that on GAE may not be on filesystem 715 """ 716 if have_portalocker and lock: 717 fileobj = portalocker.LockedFile(filename,mode) 718 else: 719 fileobj = open(filename,mode) 720 return fileobj
721
722 - def file_close(self, fileobj):
723 """ 724 to be used ONLY for files that on GAE may not be on filesystem 725 """ 726 if fileobj: 727 fileobj.close()
728
729 - def file_delete(self, filename):
730 os.unlink(filename)
731
732 - def find_driver(self,adapter_args,uri=None):
733 self.adapter_args = adapter_args 734 if getattr(self,'driver',None) != None: 735 return 736 drivers_available = [driver for driver in self.drivers 737 if driver in globals()] 738 if uri: 739 items = uri.split('://',1)[0].split(':') 740 request_driver = items[1] if len(items)>1 else None 741 else: 742 request_driver = None 743 request_driver = request_driver or adapter_args.get('driver') 744 if request_driver: 745 if request_driver in drivers_available: 746 self.driver_name = request_driver 747 self.driver = globals().get(request_driver) 748 else: 749 raise RuntimeError("driver %s not available" % request_driver) 750 elif drivers_available: 751 self.driver_name = drivers_available[0] 752 self.driver = globals().get(self.driver_name) 753 else: 754 raise RuntimeError("no driver available %s" % str(self.drivers))
755
756 - def log(self, message, table=None):
757 """ Logs migrations 758 759 It will not log changes if logfile is not specified. Defaults 760 to sql.log 761 """ 762 763 isabs = None 764 logfilename = self.adapter_args.get('logfile','sql.log') 765 writelog = bool(logfilename) 766 if writelog: 767 isabs = os.path.isabs(logfilename) 768 769 if table and table._dbt and writelog and self.folder: 770 if isabs: 771 table._loggername = logfilename 772 else: 773 table._loggername = pjoin(self.folder, logfilename) 774 logfile = self.file_open(table._loggername, 'a') 775 logfile.write(message) 776 self.file_close(logfile)
777 778
779 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 780 credential_decoder=IDENTITY, driver_args={}, 781 adapter_args={},do_connect=True, after_connection=None):
782 self.db = db 783 self.dbengine = "None" 784 self.uri = uri 785 self.pool_size = pool_size 786 self.folder = folder 787 self.db_codec = db_codec 788 self._after_connection = after_connection 789 class Dummy(object): 790 lastrowid = 1 791 def __getattr__(self, value): 792 return lambda *a, **b: []
793 self.connection = Dummy() 794 self.cursor = Dummy() 795
796 - def sequence_name(self,tablename):
797 return '%s_sequence' % tablename
798
799 - def trigger_name(self,tablename):
800 return '%s_sequence' % tablename
801
802 - def varquote(self,name):
803 return name
804
805 - def create_table(self, table, 806 migrate=True, 807 fake_migrate=False, 808 polymodel=None):
809 db = table._db 810 fields = [] 811 # PostGIS geo fields are added after the table has been created 812 postcreation_fields = [] 813 sql_fields = {} 814 sql_fields_aux = {} 815 TFK = {} 816 tablename = table._tablename 817 sortable = 0 818 types = self.types 819 for field in table: 820 sortable += 1 821 field_name = field.name 822 field_type = field.type 823 if isinstance(field_type,SQLCustomType): 824 ftype = field_type.native or field_type.type 825 elif field_type.startswith('reference'): 826 referenced = field_type[10:].strip() 827 if referenced == '.': 828 referenced = tablename 829 constraint_name = self.constraint_name(tablename, field_name) 830 if not '.' in referenced \ 831 and referenced != tablename \ 832 and hasattr(table,'_primarykey'): 833 ftype = types['integer'] 834 else: 835 if hasattr(table,'_primarykey'): 836 rtablename,rfieldname = referenced.split('.') 837 rtable = db[rtablename] 838 rfield = rtable[rfieldname] 839 # must be PK reference or unique 840 if rfieldname in rtable._primarykey or \ 841 rfield.unique: 842 ftype = types[rfield.type[:9]] % \ 843 dict(length=rfield.length) 844 # multicolumn primary key reference? 845 if not rfield.unique and len(rtable._primarykey)>1: 846 # then it has to be a table level FK 847 if rtablename not in TFK: 848 TFK[rtablename] = {} 849 TFK[rtablename][rfieldname] = field_name 850 else: 851 ftype = ftype + \ 852 types['reference FK'] % dict( 853 constraint_name = constraint_name, # should be quoted 854 foreign_key = '%s (%s)' % (rtablename, 855 rfieldname), 856 table_name = tablename, 857 field_name = field_name, 858 on_delete_action=field.ondelete) 859 else: 860 # make a guess here for circular references 861 if referenced in db: 862 id_fieldname = db[referenced]._id.name 863 elif referenced == tablename: 864 id_fieldname = table._id.name 865 else: #make a guess 866 id_fieldname = 'id' 867 ftype = types[field_type[:9]] % dict( 868 index_name = field_name+'__idx', 869 field_name = field_name, 870 constraint_name = constraint_name, 871 foreign_key = '%s (%s)' % (referenced, 872 id_fieldname), 873 on_delete_action=field.ondelete) 874 elif field_type.startswith('list:reference'): 875 ftype = types[field_type[:14]] 876 elif field_type.startswith('decimal'): 877 precision, scale = map(int,field_type[8:-1].split(',')) 878 ftype = types[field_type[:7]] % \ 879 dict(precision=precision,scale=scale) 880 elif field_type.startswith('geo'): 881 if not hasattr(self,'srid'): 882 raise RuntimeError('Adapter does not support geometry') 883 srid = self.srid 884 geotype, parms = field_type[:-1].split('(') 885 if not geotype in types: 886 raise SyntaxError( 887 'Field: unknown field type: %s for %s' \ 888 % (field_type, field_name)) 889 ftype = types[geotype] 890 if self.dbengine == 'postgres' and geotype == 'geometry': 891 # parameters: schema, srid, dimension 892 dimension = 2 # GIS.dimension ??? 893 parms = parms.split(',') 894 if len(parms) == 3: 895 schema, srid, dimension = parms 896 elif len(parms) == 2: 897 schema, srid = parms 898 else: 899 schema = parms[0] 900 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 901 ftype = ftype % dict(schema=schema, 902 tablename=tablename, 903 fieldname=field_name, srid=srid, 904 dimension=dimension) 905 postcreation_fields.append(ftype) 906 elif not field_type in types: 907 raise SyntaxError('Field: unknown field type: %s for %s' % \ 908 (field_type, field_name)) 909 else: 910 ftype = types[field_type]\ 911 % dict(length=field.length) 912 if not field_type.startswith('id') and \ 913 not field_type.startswith('reference'): 914 if field.notnull: 915 ftype += ' NOT NULL' 916 else: 917 ftype += self.ALLOW_NULL() 918 if field.unique: 919 ftype += ' UNIQUE' 920 if field.custom_qualifier: 921 ftype += ' %s' % field.custom_qualifier 922 923 # add to list of fields 924 sql_fields[field_name] = dict( 925 length=field.length, 926 unique=field.unique, 927 notnull=field.notnull, 928 sortable=sortable, 929 type=str(field_type), 930 sql=ftype) 931 932 if field.notnull and not field.default is None: 933 # Caveat: sql_fields and sql_fields_aux 934 # differ for default values. 935 # sql_fields is used to trigger migrations and sql_fields_aux 936 # is used for create tables. 937 # The reason is that we do not want to trigger 938 # a migration simply because a default value changes. 939 not_null = self.NOT_NULL(field.default, field_type) 940 ftype = ftype.replace('NOT NULL', not_null) 941 sql_fields_aux[field_name] = dict(sql=ftype) 942 # Postgres - PostGIS: 943 # geometry fields are added after the table has been created, not now 944 if not (self.dbengine == 'postgres' and \ 945 field_type.startswith('geom')): 946 fields.append('%s %s' % (field_name, ftype)) 947 other = ';' 948 949 # backend-specific extensions to fields 950 if self.dbengine == 'mysql': 951 if not hasattr(table, "_primarykey"): 952 fields.append('PRIMARY KEY(%s)' % table._id.name) 953 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 954 955 fields = ',\n '.join(fields) 956 for rtablename in TFK: 957 rfields = TFK[rtablename] 958 pkeys = db[rtablename]._primarykey 959 fkeys = [ rfields[k] for k in pkeys ] 960 fields = fields + ',\n ' + \ 961 types['reference TFK'] % dict( 962 table_name = tablename, 963 field_name=', '.join(fkeys), 964 foreign_table = rtablename, 965 foreign_key = ', '.join(pkeys), 966 on_delete_action = field.ondelete) 967 968 if getattr(table,'_primarykey',None): 969 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 970 (tablename, fields, 971 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 972 else: 973 query = "CREATE TABLE %s(\n %s\n)%s" % \ 974 (tablename, fields, other) 975 976 if self.uri.startswith('sqlite:///') \ 977 or self.uri.startswith('spatialite:///'): 978 path_encoding = sys.getfilesystemencoding() \ 979 or locale.getdefaultlocale()[1] or 'utf8' 980 dbpath = self.uri[9:self.uri.rfind('/')]\ 981 .decode('utf8').encode(path_encoding) 982 else: 983 dbpath = self.folder 984 985 if not migrate: 986 return query 987 elif self.uri.startswith('sqlite:memory')\ 988 or self.uri.startswith('spatialite:memory'): 989 table._dbt = None 990 elif isinstance(migrate, str): 991 table._dbt = pjoin(dbpath, migrate) 992 else: 993 table._dbt = pjoin( 994 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 995 996 if not table._dbt or not self.file_exists(table._dbt): 997 if table._dbt: 998 self.log('timestamp: %s\n%s\n' 999 % (datetime.datetime.today().isoformat(), 1000 query), table) 1001 if not fake_migrate: 1002 self.create_sequence_and_triggers(query,table) 1003 table._db.commit() 1004 # Postgres geom fields are added now, 1005 # after the table has been created 1006 for query in postcreation_fields: 1007 self.execute(query) 1008 table._db.commit() 1009 if table._dbt: 1010 tfile = self.file_open(table._dbt, 'w') 1011 pickle.dump(sql_fields, tfile) 1012 self.file_close(tfile) 1013 if fake_migrate: 1014 self.log('faked!\n', table) 1015 else: 1016 self.log('success!\n', table) 1017 else: 1018 tfile = self.file_open(table._dbt, 'r') 1019 try: 1020 sql_fields_old = pickle.load(tfile) 1021 except EOFError: 1022 self.file_close(tfile) 1023 raise RuntimeError('File %s appears corrupted' % table._dbt) 1024 self.file_close(tfile) 1025 if sql_fields != sql_fields_old: 1026 self.migrate_table(table, 1027 sql_fields, sql_fields_old, 1028 sql_fields_aux, None, 1029 fake_migrate=fake_migrate) 1030 return query
1031
1032 - def migrate_table( 1033 self, 1034 table, 1035 sql_fields, 1036 sql_fields_old, 1037 sql_fields_aux, 1038 logfile, 1039 fake_migrate=False, 1040 ):
1041 1042 # logfile is deprecated (moved to adapter.log method) 1043 db = table._db 1044 db._migrated.append(table._tablename) 1045 tablename = table._tablename 1046 def fix(item): 1047 k,v=item 1048 if not isinstance(v,dict): 1049 v=dict(type='unknown',sql=v) 1050 return k.lower(),v
1051 # make sure all field names are lower case to avoid 1052 # migrations because of case cahnge 1053 sql_fields = dict(map(fix,sql_fields.iteritems())) 1054 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1055 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1056 if db._debug: 1057 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1058 1059 keys = sql_fields.keys() 1060 for key in sql_fields_old: 1061 if not key in keys: 1062 keys.append(key) 1063 new_add = self.concat_add(tablename) 1064 1065 metadata_change = False 1066 sql_fields_current = copy.copy(sql_fields_old) 1067 for key in keys: 1068 query = None 1069 if not key in sql_fields_old: 1070 sql_fields_current[key] = sql_fields[key] 1071 if self.dbengine in ('postgres',) and \ 1072 sql_fields[key]['type'].startswith('geometry'): 1073 # 'sql' == ftype in sql 1074 query = [ sql_fields[key]['sql'] ] 1075 else: 1076 query = ['ALTER TABLE %s ADD %s %s;' % \ 1077 (tablename, key, 1078 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1079 metadata_change = True 1080 elif self.dbengine in ('sqlite', 'spatialite'): 1081 if key in sql_fields: 1082 sql_fields_current[key] = sql_fields[key] 1083 metadata_change = True 1084 elif not key in sql_fields: 1085 del sql_fields_current[key] 1086 ftype = sql_fields_old[key]['type'] 1087 if (self.dbengine in ('postgres',) and 1088 ftype.startswith('geometry')): 1089 geotype, parms = ftype[:-1].split('(') 1090 schema = parms.split(',')[0] 1091 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1092 "'%(table)s', '%(field)s');" % 1093 dict(schema=schema, table=tablename, field=key,) ] 1094 elif self.dbengine in ('firebird',): 1095 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1096 else: 1097 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1098 (tablename, key)] 1099 metadata_change = True 1100 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1101 and not (key in table.fields and 1102 isinstance(table[key].type, SQLCustomType)) \ 1103 and not sql_fields[key]['type'].startswith('reference')\ 1104 and not sql_fields[key]['type'].startswith('double')\ 1105 and not sql_fields[key]['type'].startswith('id'): 1106 sql_fields_current[key] = sql_fields[key] 1107 t = tablename 1108 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1109 if self.dbengine in ('firebird',): 1110 drop_expr = 'ALTER TABLE %s DROP %s;' 1111 else: 1112 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1113 key_tmp = key + '__tmp' 1114 query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt), 1115 'UPDATE %s SET %s=%s;' % (t, key_tmp, key), 1116 drop_expr % (t, key), 1117 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1118 'UPDATE %s SET %s=%s;' % (t, key, key_tmp), 1119 drop_expr % (t, key_tmp)] 1120 metadata_change = True 1121 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1122 sql_fields_current[key] = sql_fields[key] 1123 metadata_change = True 1124 1125 if query: 1126 self.log('timestamp: %s\n' 1127 % datetime.datetime.today().isoformat(), table) 1128 db['_lastsql'] = '\n'.join(query) 1129 for sub_query in query: 1130 self.log(sub_query + '\n', table) 1131 if fake_migrate: 1132 if db._adapter.commit_on_alter_table: 1133 self.save_dbt(table,sql_fields_current) 1134 self.log('faked!\n', table) 1135 else: 1136 self.execute(sub_query) 1137 # Caveat: mysql, oracle and firebird 1138 # do not allow multiple alter table 1139 # in one transaction so we must commit 1140 # partial transactions and 1141 # update table._dbt after alter table. 1142 if db._adapter.commit_on_alter_table: 1143 db.commit() 1144 self.save_dbt(table,sql_fields_current) 1145 self.log('success!\n', table) 1146 1147 elif metadata_change: 1148 self.save_dbt(table,sql_fields_current) 1149 1150 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1151 db.commit() 1152 self.save_dbt(table,sql_fields_current) 1153 self.log('success!\n', table) 1154
1155 - def save_dbt(self,table, sql_fields_current):
1156 tfile = self.file_open(table._dbt, 'w') 1157 pickle.dump(sql_fields_current, tfile) 1158 self.file_close(tfile)
1159
1160 - def LOWER(self, first):
1161 return 'LOWER(%s)' % self.expand(first)
1162
1163 - def UPPER(self, first):
1164 return 'UPPER(%s)' % self.expand(first)
1165
1166 - def COUNT(self, first, distinct=None):
1167 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1168 % self.expand(first)
1169
1170 - def EXTRACT(self, first, what):
1171 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1172
1173 - def EPOCH(self, first):
1174 return self.EXTRACT(first, 'epoch')
1175
1176 - def LENGTH(self, first):
1177 return "LENGTH(%s)" % self.expand(first)
1178
1179 - def AGGREGATE(self, first, what):
1180 return "%s(%s)" % (what, self.expand(first))
1181
1182 - def JOIN(self):
1183 return 'JOIN'
1184
1185 - def LEFT_JOIN(self):
1186 return 'LEFT JOIN'
1187
1188 - def RANDOM(self):
1189 return 'Random()'
1190
1191 - def NOT_NULL(self, default, field_type):
1192 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1193
1194 - def COALESCE(self, first, second):
1195 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1196 return 'COALESCE(%s)' % ','.join(expressions)
1197
1198 - def COALESCE_ZERO(self, first):
1199 return 'COALESCE(%s,0)' % self.expand(first)
1200
1201 - def RAW(self, first):
1202 return first
1203
1204 - def ALLOW_NULL(self):
1205 return ''
1206
1207 - def SUBSTRING(self, field, parameters):
1208 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1209
1210 - def PRIMARY_KEY(self, key):
1211 return 'PRIMARY KEY(%s)' % key
1212
1213 - def _drop(self, table, mode):
1214 return ['DROP TABLE %s;' % table]
1215
1216 - def drop(self, table, mode=''):
1217 db = table._db 1218 queries = self._drop(table, mode) 1219 for query in queries: 1220 if table._dbt: 1221 self.log(query + '\n', table) 1222 self.execute(query) 1223 db.commit() 1224 del db[table._tablename] 1225 del db.tables[db.tables.index(table._tablename)] 1226 db._remove_references_to(table) 1227 if table._dbt: 1228 self.file_delete(table._dbt) 1229 self.log('success!\n', table)
1230
1231 - def _insert(self, table, fields):
1232 if fields: 1233 keys = ','.join(f.name for f, v in fields) 1234 values = ','.join(self.expand(v, f.type) for f, v in fields) 1235 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1236 else: 1237 return self._insert_empty(table)
1238
1239 - def _insert_empty(self, table):
1240 return 'INSERT INTO %s DEFAULT VALUES;' % table
1241
1242 - def insert(self, table, fields):
1243 query = self._insert(table,fields) 1244 try: 1245 self.execute(query) 1246 except Exception: 1247 e = sys.exc_info()[1] 1248 if hasattr(table,'_on_insert_error'): 1249 return table._on_insert_error(table,fields,e) 1250 raise e 1251 if hasattr(table,'_primarykey'): 1252 return dict([(k[0].name, k[1]) for k in fields \ 1253 if k[0].name in table._primarykey]) 1254 id = self.lastrowid(table) 1255 if not isinstance(id,int): 1256 return id 1257 rid = Reference(id) 1258 (rid._table, rid._record) = (table, None) 1259 return rid
1260
1261 - def bulk_insert(self, table, items):
1262 return [self.insert(table,item) for item in items]
1263
1264 - def NOT(self, first):
1265 return '(NOT %s)' % self.expand(first)
1266
1267 - def AND(self, first, second):
1268 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1269
1270 - def OR(self, first, second):
1271 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1272
1273 - def BELONGS(self, first, second):
1274 if isinstance(second, str): 1275 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1276 if not second: 1277 return '(1=0)' 1278 items = ','.join(self.expand(item, first.type) for item in second) 1279 return '(%s IN (%s))' % (self.expand(first), items)
1280
1281 - def REGEXP(self, first, second):
1282 "regular expression operator" 1283 raise NotImplementedError
1284
1285 - def LIKE(self, first, second):
1286 "case sensitive like operator" 1287 raise NotImplementedError
1288
1289 - def ILIKE(self, first, second):
1290 "case in-sensitive like operator" 1291 return '(%s LIKE %s)' % (self.expand(first), 1292 self.expand(second, 'string'))
1293
1294 - def STARTSWITH(self, first, second):
1295 return '(%s LIKE %s)' % (self.expand(first), 1296 self.expand(second+'%', 'string'))
1297
1298 - def ENDSWITH(self, first, second):
1299 return '(%s LIKE %s)' % (self.expand(first), 1300 self.expand('%'+second, 'string'))
1301
1302 - def CONTAINS(self,first,second,case_sensitive=False):
1303 if first.type in ('string','text', 'json'): 1304 if isinstance(second,Expression): 1305 second = Expression(None,self.CONCAT('%',Expression( 1306 None,self.REPLACE(second,('%','%%'))),'%')) 1307 else: 1308 second = '%'+str(second).replace('%','%%')+'%' 1309 elif first.type.startswith('list:'): 1310 if isinstance(second,Expression): 1311 second = Expression(None,self.CONCAT( 1312 '%|',Expression(None,self.REPLACE( 1313 Expression(None,self.REPLACE( 1314 second,('%','%%'))),('|','||'))),'|%')) 1315 else: 1316 second = '%|'+str(second).replace('%','%%')\ 1317 .replace('|','||')+'|%' 1318 op = case_sensitive and self.LIKE or self.ILIKE 1319 return op(first,second)
1320
1321 - def EQ(self, first, second=None):
1322 if second is None: 1323 return '(%s IS NULL)' % self.expand(first) 1324 return '(%s = %s)' % (self.expand(first), 1325 self.expand(second, first.type))
1326
1327 - def NE(self, first, second=None):
1328 if second is None: 1329 return '(%s IS NOT NULL)' % self.expand(first) 1330 return '(%s <> %s)' % (self.expand(first), 1331 self.expand(second, first.type))
1332
1333 - def LT(self,first,second=None):
1334 if second is None: 1335 raise RuntimeError("Cannot compare %s < None" % first) 1336 return '(%s < %s)' % (self.expand(first), 1337 self.expand(second,first.type))
1338
1339 - def LE(self,first,second=None):
1340 if second is None: 1341 raise RuntimeError("Cannot compare %s <= None" % first) 1342 return '(%s <= %s)' % (self.expand(first), 1343 self.expand(second,first.type))
1344
1345 - def GT(self,first,second=None):
1346 if second is None: 1347 raise RuntimeError("Cannot compare %s > None" % first) 1348 return '(%s > %s)' % (self.expand(first), 1349 self.expand(second,first.type))
1350
1351 - def GE(self,first,second=None):
1352 if second is None: 1353 raise RuntimeError("Cannot compare %s >= None" % first) 1354 return '(%s >= %s)' % (self.expand(first), 1355 self.expand(second,first.type))
1356
1357 - def is_numerical_type(self, ftype):
1358 return ftype in ('integer','boolean','double','bigint') or \ 1359 ftype.startswith('decimal')
1360
1361 - def REPLACE(self, first, (second, third)):
1362 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1363 self.expand(second,'string'), 1364 self.expand(third,'string'))
1365
1366 - def CONCAT(self, *items):
1367 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1368
1369 - def ADD(self, first, second):
1370 if self.is_numerical_type(first.type): 1371 return '(%s + %s)' % (self.expand(first), 1372 self.expand(second, first.type)) 1373 else: 1374 return self.CONCAT(first, second)
1375
1376 - def SUB(self, first, second):
1377 return '(%s - %s)' % (self.expand(first), 1378 self.expand(second, first.type))
1379
1380 - def MUL(self, first, second):
1381 return '(%s * %s)' % (self.expand(first), 1382 self.expand(second, first.type))
1383
1384 - def DIV(self, first, second):
1385 return '(%s / %s)' % (self.expand(first), 1386 self.expand(second, first.type))
1387
1388 - def MOD(self, first, second):
1389 return '(%s %% %s)' % (self.expand(first), 1390 self.expand(second, first.type))
1391
1392 - def AS(self, first, second):
1393 return '%s AS %s' % (self.expand(first), second)
1394
1395 - def ON(self, first, second):
1396 if use_common_filters(second): 1397 second = self.common_filter(second,[first._tablename]) 1398 return '%s ON %s' % (self.expand(first), self.expand(second))
1399
1400 - def INVERT(self, first):
1401 return '%s DESC' % self.expand(first)
1402
1403 - def COMMA(self, first, second):
1404 return '%s, %s' % (self.expand(first), self.expand(second))
1405
1406 - def CAST(self, first, second):
1407 return 'CAST(%s AS %s)' % (first, second)
1408
1409 - def expand(self, expression, field_type=None):
1410 if isinstance(expression, Field): 1411 out = '%s.%s' % (expression.table._tablename, expression.name) 1412 if field_type == 'string' and not expression.type in ( 1413 'string','text','json','password'): 1414 out = self.CAST(out, self.types['text']) 1415 return out 1416 elif isinstance(expression, (Expression, Query)): 1417 first = expression.first 1418 second = expression.second 1419 op = expression.op 1420 optional_args = expression.optional_args or {} 1421 if not second is None: 1422 out = op(first, second, **optional_args) 1423 elif not first is None: 1424 out = op(first,**optional_args) 1425 elif isinstance(op, str): 1426 if op.endswith(';'): 1427 op=op[:-1] 1428 out = '(%s)' % op 1429 else: 1430 out = op() 1431 return out 1432 elif field_type: 1433 return str(self.represent(expression,field_type)) 1434 elif isinstance(expression,(list,tuple)): 1435 return ','.join(self.represent(item,field_type) \ 1436 for item in expression) 1437 elif isinstance(expression, bool): 1438 return '1' if expression else '0' 1439 else: 1440 return str(expression)
1441
1442 - def table_alias(self,name):
1443 return str(name if isinstance(name,Table) else self.db[name])
1444
1445 - def alias(self, table, alias):
1446 """ 1447 Given a table object, makes a new table object 1448 with alias name. 1449 """ 1450 other = copy.copy(table) 1451 other['_ot'] = other._ot or other._tablename 1452 other['ALL'] = SQLALL(other) 1453 other['_tablename'] = alias 1454 for fieldname in other.fields: 1455 other[fieldname] = copy.copy(other[fieldname]) 1456 other[fieldname]._tablename = alias 1457 other[fieldname].tablename = alias 1458 other[fieldname].table = other 1459 table._db[alias] = other 1460 return other
1461
1462 - def _truncate(self, table, mode=''):
1463 tablename = table._tablename 1464 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1465
1466 - def truncate(self, table, mode= ' '):
1467 # Prepare functions "write_to_logfile" and "close_logfile" 1468 try: 1469 queries = table._db._adapter._truncate(table, mode) 1470 for query in queries: 1471 self.log(query + '\n', table) 1472 self.execute(query) 1473 table._db.commit() 1474 self.log('success!\n', table) 1475 finally: 1476 pass
1477
1478 - def _update(self, tablename, query, fields):
1479 if query: 1480 if use_common_filters(query): 1481 query = self.common_filter(query, [tablename]) 1482 sql_w = ' WHERE ' + self.expand(query) 1483 else: 1484 sql_w = '' 1485 sql_v = ','.join(['%s=%s' % (field.name, 1486 self.expand(value, field.type)) \ 1487 for (field, value) in fields]) 1488 tablename = "%s" % self.db[tablename] 1489 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1490
1491 - def update(self, tablename, query, fields):
1492 sql = self._update(tablename, query, fields) 1493 try: 1494 self.execute(sql) 1495 except Exception: 1496 e = sys.exc_info()[1] 1497 table = self.db[tablename] 1498 if hasattr(table,'_on_update_error'): 1499 return table._on_update_error(table,query,fields,e) 1500 raise e 1501 try: 1502 return self.cursor.rowcount 1503 except: 1504 return None
1505
1506 - def _delete(self, tablename, query):
1507 if query: 1508 if use_common_filters(query): 1509 query = self.common_filter(query, [tablename]) 1510 sql_w = ' WHERE ' + self.expand(query) 1511 else: 1512 sql_w = '' 1513 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1514
1515 - def delete(self, tablename, query):
1516 sql = self._delete(tablename, query) 1517 ### special code to handle CASCADE in SQLite & SpatiaLite 1518 db = self.db 1519 table = db[tablename] 1520 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1521 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1522 ### end special code to handle CASCADE in SQLite & SpatiaLite 1523 self.execute(sql) 1524 try: 1525 counter = self.cursor.rowcount 1526 except: 1527 counter = None 1528 ### special code to handle CASCADE in SQLite & SpatiaLite 1529 if self.dbengine in ('sqlite', 'spatialite') and counter: 1530 for field in table._referenced_by: 1531 if field.type=='reference '+table._tablename \ 1532 and field.ondelete=='CASCADE': 1533 db(field.belongs(deleted)).delete() 1534 ### end special code to handle CASCADE in SQLite & SpatiaLite 1535 return counter
1536
1537 - def get_table(self, query):
1538 tablenames = self.tables(query) 1539 if len(tablenames)==1: 1540 return tablenames[0] 1541 elif len(tablenames)<1: 1542 raise RuntimeError("No table selected") 1543 else: 1544 raise RuntimeError("Too many tables selected")
1545
1546 - def expand_all(self, fields, tablenames):
1547 db = self.db 1548 new_fields = [] 1549 append = new_fields.append 1550 for item in fields: 1551 if isinstance(item,SQLALL): 1552 new_fields += item._table 1553 elif isinstance(item,str): 1554 if REGEX_TABLE_DOT_FIELD.match(item): 1555 tablename,fieldname = item.split('.') 1556 append(db[tablename][fieldname]) 1557 else: 1558 append(Expression(db,lambda item=item:item)) 1559 else: 1560 append(item) 1561 # ## if no fields specified take them all from the requested tables 1562 if not new_fields: 1563 for table in tablenames: 1564 for field in db[table]: 1565 append(field) 1566 return new_fields
1567
1568 - def _select(self, query, fields, attributes):
1569 tables = self.tables 1570 for key in set(attributes.keys())-SELECT_ARGS: 1571 raise SyntaxError('invalid select attribute: %s' % key) 1572 args_get = attributes.get 1573 tablenames = tables(query) 1574 tablenames_for_common_filters = tablenames 1575 for field in fields: 1576 if isinstance(field, basestring) \ 1577 and REGEX_TABLE_DOT_FIELD.match(field): 1578 tn,fn = field.split('.') 1579 field = self.db[tn][fn] 1580 for tablename in tables(field): 1581 if not tablename in tablenames: 1582 tablenames.append(tablename) 1583 1584 if len(tablenames) < 1: 1585 raise SyntaxError('Set: no tables selected') 1586 self._colnames = map(self.expand, fields) 1587 def geoexpand(field): 1588 if isinstance(field.type,str) and field.type.startswith('geometry'): 1589 field = field.st_astext() 1590 return self.expand(field)
1591 sql_f = ', '.join(map(geoexpand, fields)) 1592 sql_o = '' 1593 sql_s = '' 1594 left = args_get('left', False) 1595 inner_join = args_get('join', False) 1596 distinct = args_get('distinct', False) 1597 groupby = args_get('groupby', False) 1598 orderby = args_get('orderby', False) 1599 having = args_get('having', False) 1600 limitby = args_get('limitby', False) 1601 orderby_on_limitby = args_get('orderby_on_limitby', True) 1602 for_update = args_get('for_update', False) 1603 if self.can_select_for_update is False and for_update is True: 1604 raise SyntaxError('invalid select attribute: for_update') 1605 if distinct is True: 1606 sql_s += 'DISTINCT' 1607 elif distinct: 1608 sql_s += 'DISTINCT ON (%s)' % distinct 1609 if inner_join: 1610 icommand = self.JOIN() 1611 if not isinstance(inner_join, (tuple, list)): 1612 inner_join = [inner_join] 1613 ijoint = [t._tablename for t in inner_join 1614 if not isinstance(t,Expression)] 1615 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1616 itables_to_merge={} #issue 490 1617 [itables_to_merge.update( 1618 dict.fromkeys(tables(t))) for t in ijoinon] 1619 ijoinont = [t.first._tablename for t in ijoinon] 1620 [itables_to_merge.pop(t) for t in ijoinont 1621 if t in itables_to_merge] #issue 490 1622 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1623 iexcluded = [t for t in tablenames 1624 if not t in iimportant_tablenames] 1625 if left: 1626 join = attributes['left'] 1627 command = self.LEFT_JOIN() 1628 if not isinstance(join, (tuple, list)): 1629 join = [join] 1630 joint = [t._tablename for t in join 1631 if not isinstance(t, Expression)] 1632 joinon = [t for t in join if isinstance(t, Expression)] 1633 #patch join+left patch (solves problem with ordering in left joins) 1634 tables_to_merge={} 1635 [tables_to_merge.update( 1636 dict.fromkeys(tables(t))) for t in joinon] 1637 joinont = [t.first._tablename for t in joinon] 1638 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1639 tablenames_for_common_filters = [t for t in tablenames 1640 if not t in joinont ] 1641 important_tablenames = joint + joinont + tables_to_merge.keys() 1642 excluded = [t for t in tablenames 1643 if not t in important_tablenames ] 1644 else: 1645 excluded = tablenames 1646 1647 if use_common_filters(query): 1648 query = self.common_filter(query,tablenames_for_common_filters) 1649 sql_w = ' WHERE ' + self.expand(query) if query else '' 1650 1651 if inner_join and not left: 1652 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1653 itables_to_merge.keys()]) 1654 for t in ijoinon: 1655 sql_t += ' %s %s' % (icommand, t) 1656 elif not inner_join and left: 1657 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1658 tables_to_merge.keys()]) 1659 if joint: 1660 sql_t += ' %s %s' % (command, 1661 ','.join([self.table_alias(t) for t in joint])) 1662 for t in joinon: 1663 sql_t += ' %s %s' % (command, t) 1664 elif inner_join and left: 1665 all_tables_in_query = set(important_tablenames + \ 1666 iimportant_tablenames + \ 1667 tablenames) 1668 tables_in_joinon = set(joinont + ijoinont) 1669 tables_not_in_joinon = \ 1670 all_tables_in_query.difference(tables_in_joinon) 1671 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1672 for t in ijoinon: 1673 sql_t += ' %s %s' % (icommand, t) 1674 if joint: 1675 sql_t += ' %s %s' % (command, 1676 ','.join([self.table_alias(t) for t in joint])) 1677 for t in joinon: 1678 sql_t += ' %s %s' % (command, t) 1679 else: 1680 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1681 if groupby: 1682 if isinstance(groupby, (list, tuple)): 1683 groupby = xorify(groupby) 1684 sql_o += ' GROUP BY %s' % self.expand(groupby) 1685 if having: 1686 sql_o += ' HAVING %s' % attributes['having'] 1687 if orderby: 1688 if isinstance(orderby, (list, tuple)): 1689 orderby = xorify(orderby) 1690 if str(orderby) == '<random>': 1691 sql_o += ' ORDER BY %s' % self.RANDOM() 1692 else: 1693 sql_o += ' ORDER BY %s' % self.expand(orderby) 1694 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1695 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1696 # oracle does not support limitby 1697 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1698 if for_update and self.can_select_for_update is True: 1699 sql = sql.rstrip(';') + ' FOR UPDATE;' 1700 return sql 1701
1702 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1703 if limitby: 1704 (lmin, lmax) = limitby 1705 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1706 return 'SELECT %s %s FROM %s%s%s;' % \ 1707 (sql_s, sql_f, sql_t, sql_w, sql_o)
1708
1709 - def _fetchall(self):
1710 return self.cursor.fetchall()
1711
1712 - def _select_aux(self,sql,fields,attributes):
1713 args_get = attributes.get 1714 cache = args_get('cache',None) 1715 if not cache: 1716 self.execute(sql) 1717 rows = self._fetchall() 1718 else: 1719 (cache_model, time_expire) = cache 1720 key = self.uri + '/' + sql + '/rows' 1721 if len(key)>200: key = hashlib_md5(key).hexdigest() 1722 def _select_aux2(): 1723 self.execute(sql) 1724 return self._fetchall()
1725 rows = cache_model(key,_select_aux2,time_expire) 1726 if isinstance(rows,tuple): 1727 rows = list(rows) 1728 limitby = args_get('limitby', None) or (0,) 1729 rows = self.rowslice(rows,limitby[0],None) 1730 processor = args_get('processor',self.parse) 1731 cacheable = args_get('cacheable',False) 1732 return processor(rows,fields,self._colnames,cacheable=cacheable) 1733
1734 - def select(self, query, fields, attributes):
1735 """ 1736 Always returns a Rows object, possibly empty. 1737 """ 1738 sql = self._select(query, fields, attributes) 1739 cache = attributes.get('cache', None) 1740 if cache and attributes.get('cacheable',False): 1741 del attributes['cache'] 1742 (cache_model, time_expire) = cache 1743 key = self.uri + '/' + sql 1744 if len(key)>200: key = hashlib_md5(key).hexdigest() 1745 args = (sql,fields,attributes) 1746 return cache_model( 1747 key, 1748 lambda self=self,args=args:self._select_aux(*args), 1749 time_expire) 1750 else: 1751 return self._select_aux(sql,fields,attributes)
1752
1753 - def _count(self, query, distinct=None):
1754 tablenames = self.tables(query) 1755 if query: 1756 if use_common_filters(query): 1757 query = self.common_filter(query, tablenames) 1758 sql_w = ' WHERE ' + self.expand(query) 1759 else: 1760 sql_w = '' 1761 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1762 if distinct: 1763 if isinstance(distinct,(list, tuple)): 1764 distinct = xorify(distinct) 1765 sql_d = self.expand(distinct) 1766 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1767 (sql_d, sql_t, sql_w) 1768 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1769
1770 - def count(self, query, distinct=None):
1771 self.execute(self._count(query, distinct)) 1772 return self.cursor.fetchone()[0]
1773
1774 - def tables(self, *queries):
1775 tables = set() 1776 for query in queries: 1777 if isinstance(query, Field): 1778 tables.add(query.tablename) 1779 elif isinstance(query, (Expression, Query)): 1780 if not query.first is None: 1781 tables = tables.union(self.tables(query.first)) 1782 if not query.second is None: 1783 tables = tables.union(self.tables(query.second)) 1784 return list(tables)
1785
1786 - def commit(self):
1787 if self.connection: 1788 return self.connection.commit()
1789
1790 - def rollback(self):
1791 if self.connection: 1792 return self.connection.rollback()
1793
1794 - def close_connection(self):
1795 if self.connection: 1796 r = self.connection.close() 1797 self.connection = None 1798 return r
1799
1800 - def distributed_transaction_begin(self, key):
1801 return
1802
1803 - def prepare(self, key):
1804 if self.connection: self.connection.prepare()
1805
1806 - def commit_prepared(self, key):
1807 if self.connection: self.connection.commit()
1808
1809 - def rollback_prepared(self, key):
1810 if self.connection: self.connection.rollback()
1811
1812 - def concat_add(self, tablename):
1813 return ', ADD '
1814
1815 - def constraint_name(self, table, fieldname):
1816 return '%s_%s__constraint' % (table,fieldname)
1817
1818 - def create_sequence_and_triggers(self, query, table, **args):
1819 self.execute(query)
1820
1821 - def log_execute(self, *a, **b):
1822 if not self.connection: return None 1823 command = a[0] 1824 if hasattr(self,'filter_sql_command'): 1825 command = self.filter_sql_command(command) 1826 if self.db._debug: 1827 LOGGER.debug('SQL: %s' % command) 1828 self.db._lastsql = command 1829 t0 = time.time() 1830 ret = self.cursor.execute(command, *a[1:], **b) 1831 self.db._timings.append((command,time.time()-t0)) 1832 del self.db._timings[:-TIMINGSSIZE] 1833 return ret
1834
1835 - def execute(self, *a, **b):
1836 return self.log_execute(*a, **b)
1837
1838 - def represent(self, obj, fieldtype):
1839 field_is_type = fieldtype.startswith 1840 if isinstance(obj, CALLABLETYPES): 1841 obj = obj() 1842 if isinstance(fieldtype, SQLCustomType): 1843 value = fieldtype.encoder(obj) 1844 if fieldtype.type in ('string','text', 'json'): 1845 return self.adapt(value) 1846 return value 1847 if isinstance(obj, (Expression, Field)): 1848 return str(obj) 1849 if field_is_type('list:'): 1850 if not obj: 1851 obj = [] 1852 elif not isinstance(obj, (list, tuple)): 1853 obj = [obj] 1854 if field_is_type('list:string'): 1855 obj = map(str,obj) 1856 else: 1857 obj = map(int,[o for o in obj if o != '']) 1858 # we don't want to bar_encode json objects 1859 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1860 obj = bar_encode(obj) 1861 if obj is None: 1862 return 'NULL' 1863 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1864 return 'NULL' 1865 r = self.represent_exceptions(obj, fieldtype) 1866 if not r is None: 1867 return r 1868 if fieldtype == 'boolean': 1869 if obj and not str(obj)[:1].upper() in '0F': 1870 return self.smart_adapt(self.TRUE) 1871 else: 1872 return self.smart_adapt(self.FALSE) 1873 if fieldtype == 'id' or fieldtype == 'integer': 1874 return str(long(obj)) 1875 if field_is_type('decimal'): 1876 return str(obj) 1877 elif field_is_type('reference'): # reference 1878 if fieldtype.find('.')>0: 1879 return repr(obj) 1880 elif isinstance(obj, (Row, Reference)): 1881 return str(obj['id']) 1882 return str(long(obj)) 1883 elif fieldtype == 'double': 1884 return repr(float(obj)) 1885 if isinstance(obj, unicode): 1886 obj = obj.encode(self.db_codec) 1887 if fieldtype == 'blob': 1888 obj = base64.b64encode(str(obj)) 1889 elif fieldtype == 'date': 1890 if isinstance(obj, (datetime.date, datetime.datetime)): 1891 obj = obj.isoformat()[:10] 1892 else: 1893 obj = str(obj) 1894 elif fieldtype == 'datetime': 1895 if isinstance(obj, datetime.datetime): 1896 obj = obj.isoformat(self.T_SEP)[:19] 1897 elif isinstance(obj, datetime.date): 1898 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 1899 else: 1900 obj = str(obj) 1901 elif fieldtype == 'time': 1902 if isinstance(obj, datetime.time): 1903 obj = obj.isoformat()[:10] 1904 else: 1905 obj = str(obj) 1906 elif fieldtype == 'json': 1907 if not self.native_json: 1908 if have_serializers: 1909 obj = serializers.json(obj) 1910 elif simplejson: 1911 obj = simplejson.dumps(obj) 1912 else: 1913 raise RuntimeError("missing simplejson") 1914 if not isinstance(obj,bytes): 1915 obj = bytes(obj) 1916 try: 1917 obj.decode(self.db_codec) 1918 except: 1919 obj = obj.decode('latin1').encode(self.db_codec) 1920 return self.adapt(obj)
1921
1922 - def represent_exceptions(self, obj, fieldtype):
1923 return None
1924
1925 - def lastrowid(self, table):
1926 return None
1927
1928 - def rowslice(self, rows, minimum=0, maximum=None):
1929 """ 1930 By default this function does nothing; 1931 overload when db does not do slicing. 1932 """ 1933 return rows
1934
1935 - def parse_value(self, value, field_type, blob_decode=True):
1936 if field_type != 'blob' and isinstance(value, str): 1937 try: 1938 value = value.decode(self.db._db_codec) 1939 except Exception: 1940 pass 1941 if isinstance(value, unicode): 1942 value = value.encode('utf-8') 1943 if isinstance(field_type, SQLCustomType): 1944 value = field_type.decoder(value) 1945 if not isinstance(field_type, str) or value is None: 1946 return value 1947 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1948 return value 1949 elif field_type.startswith('geo'): 1950 return value 1951 elif field_type == 'blob' and not blob_decode: 1952 return value 1953 else: 1954 key = REGEX_TYPE.match(field_type).group(0) 1955 return self.parsemap[key](value,field_type)
1956
1957 - def parse_reference(self, value, field_type):
1958 referee = field_type[10:].strip() 1959 if not '.' in referee: 1960 value = Reference(value) 1961 value._table, value._record = self.db[referee], None 1962 return value
1963
1964 - def parse_boolean(self, value, field_type):
1965 return value == self.TRUE or str(value)[:1].lower() == 't'
1966
1967 - def parse_date(self, value, field_type):
1968 if isinstance(value, datetime.datetime): 1969 return value.date() 1970 if not isinstance(value, (datetime.date,datetime.datetime)): 1971 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1972 value = datetime.date(y, m, d) 1973 return value
1974
1975 - def parse_time(self, value, field_type):
1976 if not isinstance(value, datetime.time): 1977 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1978 if len(time_items) == 3: 1979 (h, mi, s) = time_items 1980 else: 1981 (h, mi, s) = time_items + [0] 1982 value = datetime.time(h, mi, s) 1983 return value
1984
1985 - def parse_datetime(self, value, field_type):
1986 if not isinstance(value, datetime.datetime): 1987 value = str(value) 1988 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1989 if '+' in timezone: 1990 ms,tz = timezone.split('+') 1991 h,m = tz.split(':') 1992 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1993 elif '-' in timezone: 1994 ms,tz = timezone.split('-') 1995 h,m = tz.split(':') 1996 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1997 else: 1998 dt = None 1999 (y, m, d) = map(int,date_part.split('-')) 2000 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 2001 while len(time_parts)<3: time_parts.append(0) 2002 time_items = map(int,time_parts) 2003 (h, mi, s) = time_items 2004 value = datetime.datetime(y, m, d, h, mi, s) 2005 if dt: 2006 value = value + dt 2007 return value
2008
2009 - def parse_blob(self, value, field_type):
2010 return base64.b64decode(str(value))
2011
2012 - def parse_decimal(self, value, field_type):
2013 decimals = int(field_type[8:-1].split(',')[-1]) 2014 if self.dbengine in ('sqlite', 'spatialite'): 2015 value = ('%.' + str(decimals) + 'f') % value 2016 if not isinstance(value, decimal.Decimal): 2017 value = decimal.Decimal(str(value)) 2018 return value
2019
2020 - def parse_list_integers(self, value, field_type):
2021 if not isinstance(self, NoSQLAdapter): 2022 value = bar_decode_integer(value) 2023 return value
2024
2025 - def parse_list_references(self, value, field_type):
2026 if not isinstance(self, NoSQLAdapter): 2027 value = bar_decode_integer(value) 2028 return [self.parse_reference(r, field_type[5:]) for r in value]
2029
2030 - def parse_list_strings(self, value, field_type):
2031 if not isinstance(self, NoSQLAdapter): 2032 value = bar_decode_string(value) 2033 return value
2034
2035 - def parse_id(self, value, field_type):
2036 return long(value)
2037
2038 - def parse_integer(self, value, field_type):
2039 return long(value)
2040
2041 - def parse_double(self, value, field_type):
2042 return float(value)
2043
2044 - def parse_json(self, value, field_type):
2045 if not self.native_json: 2046 if not isinstance(value, basestring): 2047 raise RuntimeError('json data not a string') 2048 if isinstance(value, unicode): 2049 value = value.encode('utf-8') 2050 if have_serializers: 2051 value = serializers.loads_json(value) 2052 elif simplejson: 2053 value = simplejson.loads(value) 2054 else: 2055 raise RuntimeError("missing simplejson") 2056 return value
2057
2058 - def build_parsemap(self):
2059 self.parsemap = { 2060 'id':self.parse_id, 2061 'integer':self.parse_integer, 2062 'bigint':self.parse_integer, 2063 'float':self.parse_double, 2064 'double':self.parse_double, 2065 'reference':self.parse_reference, 2066 'boolean':self.parse_boolean, 2067 'date':self.parse_date, 2068 'time':self.parse_time, 2069 'datetime':self.parse_datetime, 2070 'blob':self.parse_blob, 2071 'decimal':self.parse_decimal, 2072 'json':self.parse_json, 2073 'list:integer':self.parse_list_integers, 2074 'list:reference':self.parse_list_references, 2075 'list:string':self.parse_list_strings, 2076 }
2077
2078 - def parse(self, rows, fields, colnames, blob_decode=True, 2079 cacheable = False):
2080 db = self.db 2081 virtualtables = [] 2082 new_rows = [] 2083 tmps = [] 2084 for colname in colnames: 2085 if not REGEX_TABLE_DOT_FIELD.match(colname): 2086 tmps.append(None) 2087 else: 2088 (tablename, _the_sep_, fieldname) = colname.partition('.') 2089 table = db[tablename] 2090 field = table[fieldname] 2091 ft = field.type 2092 tmps.append((tablename,fieldname,table,field,ft)) 2093 for (i,row) in enumerate(rows): 2094 new_row = Row() 2095 for (j,colname) in enumerate(colnames): 2096 value = row[j] 2097 tmp = tmps[j] 2098 if tmp: 2099 (tablename,fieldname,table,field,ft) = tmp 2100 if tablename in new_row: 2101 colset = new_row[tablename] 2102 else: 2103 colset = new_row[tablename] = Row() 2104 if tablename not in virtualtables: 2105 virtualtables.append(tablename) 2106 value = self.parse_value(value,ft,blob_decode) 2107 if field.filter_out: 2108 value = field.filter_out(value) 2109 colset[fieldname] = value 2110 2111 # for backward compatibility 2112 if ft=='id' and fieldname!='id' and \ 2113 not 'id' in table.fields: 2114 colset['id'] = value 2115 2116 if ft == 'id' and not cacheable: 2117 # temporary hack to deal with 2118 # GoogleDatastoreAdapter 2119 # references 2120 if isinstance(self, GoogleDatastoreAdapter): 2121 id = value.key().id_or_name() 2122 colset[fieldname] = id 2123 colset.gae_item = value 2124 else: 2125 id = value 2126 colset.update_record = RecordUpdater(colset,table,id) 2127 colset.delete_record = RecordDeleter(table,id) 2128 if table._db._lazy_tables: 2129 colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) 2130 for rfield in table._referenced_by: 2131 referee_link = db._referee_name and \ 2132 db._referee_name % dict( 2133 table=rfield.tablename,field=rfield.name) 2134 if referee_link and not referee_link in colset: 2135 colset[referee_link] = LazySet(rfield,id) 2136 else: 2137 if not '_extra' in new_row: 2138 new_row['_extra'] = Row() 2139 new_row['_extra'][colname] = \ 2140 self.parse_value(value, 2141 fields[j].type,blob_decode) 2142 new_column_name = \ 2143 REGEX_SELECT_AS_PARSER.search(colname) 2144 if not new_column_name is None: 2145 column_name = new_column_name.groups(0) 2146 setattr(new_row,column_name[0],value) 2147 new_rows.append(new_row) 2148 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2149 2150 2151 for tablename in virtualtables: 2152 table = db[tablename] 2153 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2154 if isinstance(v,FieldVirtual)] 2155 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2156 if isinstance(v,FieldMethod)] 2157 if fields_virtual or fields_lazy: 2158 for row in rowsobj.records: 2159 box = row[tablename] 2160 for f,v in fields_virtual: 2161 try: 2162 box[f] = v.f(row) 2163 except AttributeError: 2164 pass # not enough fields to define virtual field 2165 for f,v in fields_lazy: 2166 try: 2167 box[f] = (v.handler or VirtualCommand)(v.f,row) 2168 except AttributeError: 2169 pass # not enough fields to define virtual field 2170 2171 ### old style virtual fields 2172 for item in table.virtualfields: 2173 try: 2174 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2175 except (KeyError, AttributeError): 2176 # to avoid breaking virtualfields when partial select 2177 pass 2178 return rowsobj
2179
2180 - def common_filter(self, query, tablenames):
2181 tenant_fieldname = self.db._request_tenant 2182 2183 for tablename in tablenames: 2184 table = self.db[tablename] 2185 2186 # deal with user provided filters 2187 if table._common_filter != None: 2188 query = query & table._common_filter(query) 2189 2190 # deal with multi_tenant filters 2191 if tenant_fieldname in table: 2192 default = table[tenant_fieldname].default 2193 if not default is None: 2194 newquery = table[tenant_fieldname] == default 2195 if query is None: 2196 query = newquery 2197 else: 2198 query = query & newquery 2199 return query
2200
2201 - def CASE(self,query,t,f):
2202 def represent(x): 2203 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2204 if x is None: return 'NULL' 2205 elif isinstance(x,Expression): return str(x) 2206 else: return self.represent(x,types.get(type(x),'string'))
2207 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2208 (self.expand(query),represent(t),represent(f))) 2209
2210 ################################################################################### 2211 # List of all the available adapters; they all extend BaseAdapter. 2212 ################################################################################### 2213 2214 -class SQLiteAdapter(BaseAdapter):
2215 drivers = ('sqlite2','sqlite3') 2216 2217 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2218
2219 - def EXTRACT(self,field,what):
2220 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2221 2222 @staticmethod
2223 - def web2py_extract(lookup, s):
2224 table = { 2225 'year': (0, 4), 2226 'month': (5, 7), 2227 'day': (8, 10), 2228 'hour': (11, 13), 2229 'minute': (14, 16), 2230 'second': (17, 19), 2231 } 2232 try: 2233 if lookup != 'epoch': 2234 (i, j) = table[lookup] 2235 return int(s[i:j]) 2236 else: 2237 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2238 except: 2239 return None
2240 2241 @staticmethod
2242 - def web2py_regexp(expression, item):
2243 return re.compile(expression).search(item) is not None
2244
2245 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2246 credential_decoder=IDENTITY, driver_args={}, 2247 adapter_args={}, do_connect=True, after_connection=None):
2248 self.db = db 2249 self.dbengine = "sqlite" 2250 self.uri = uri 2251 if do_connect: self.find_driver(adapter_args) 2252 self.pool_size = 0 2253 self.folder = folder 2254 self.db_codec = db_codec 2255 self._after_connection = after_connection 2256 self.find_or_make_work_folder() 2257 path_encoding = sys.getfilesystemencoding() \ 2258 or locale.getdefaultlocale()[1] or 'utf8' 2259 if uri.startswith('sqlite:memory'): 2260 self.dbpath = ':memory:' 2261 else: 2262 self.dbpath = uri.split('://',1)[1] 2263 if self.dbpath[0] != '/': 2264 if PYTHON_VERSION == 2: 2265 self.dbpath = pjoin( 2266 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2267 else: 2268 self.dbpath = pjoin(self.folder, self.dbpath) 2269 if not 'check_same_thread' in driver_args: 2270 driver_args['check_same_thread'] = False 2271 if not 'detect_types' in driver_args and do_connect: 2272 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2273 def connector(dbpath=self.dbpath, driver_args=driver_args): 2274 return self.driver.Connection(dbpath, **driver_args)
2275 self.connector = connector 2276 if do_connect: self.reconnect()
2277
2278 - def after_connection(self):
2279 self.connection.create_function('web2py_extract', 2, 2280 SQLiteAdapter.web2py_extract) 2281 self.connection.create_function("REGEXP", 2, 2282 SQLiteAdapter.web2py_regexp)
2283
2284 - def _truncate(self, table, mode=''):
2285 tablename = table._tablename 2286 return ['DELETE FROM %s;' % tablename, 2287 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2288
2289 - def lastrowid(self, table):
2290 return self.cursor.lastrowid
2291
2292 - def REGEXP(self,first,second):
2293 return '(%s REGEXP %s)' % (self.expand(first), 2294 self.expand(second,'string'))
2295
2296 - def select(self, query, fields, attributes):
2297 """ 2298 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2299 Note that the entire database, rather than one record, is locked 2300 (it will be locked eventually anyway by the following UPDATE). 2301 """ 2302 if attributes.get('for_update', False) and not 'cache' in attributes: 2303 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2304 return super(SQLiteAdapter, self).select(query, fields, attributes)
2305
2306 -class SpatiaLiteAdapter(SQLiteAdapter):
2307 drivers = ('sqlite3','sqlite2') 2308 2309 types = copy.copy(BaseAdapter.types) 2310 types.update(geometry='GEOMETRY') 2311
2312 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2313 credential_decoder=IDENTITY, driver_args={}, 2314 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2315 self.db = db 2316 self.dbengine = "spatialite" 2317 self.uri = uri 2318 if do_connect: self.find_driver(adapter_args) 2319 self.pool_size = 0 2320 self.folder = folder 2321 self.db_codec = db_codec 2322 self._after_connection = after_connection 2323 self.find_or_make_work_folder() 2324 self.srid = srid 2325 path_encoding = sys.getfilesystemencoding() \ 2326 or locale.getdefaultlocale()[1] or 'utf8' 2327 if uri.startswith('spatialite:memory'): 2328 self.dbpath = ':memory:' 2329 else: 2330 self.dbpath = uri.split('://',1)[1] 2331 if self.dbpath[0] != '/': 2332 self.dbpath = pjoin( 2333 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2334 if not 'check_same_thread' in driver_args: 2335 driver_args['check_same_thread'] = False 2336 if not 'detect_types' in driver_args and do_connect: 2337 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2338 def connector(dbpath=self.dbpath, driver_args=driver_args): 2339 return self.driver.Connection(dbpath, **driver_args)
2340 self.connector = connector 2341 if do_connect: self.reconnect()
2342
2343 - def after_connection(self):
2344 self.connection.enable_load_extension(True) 2345 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2346 # Linux uses libspatialite.so 2347 # Mac OS X uses libspatialite.dylib 2348 libspatialite = SPATIALLIBS[platform.system()] 2349 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2350 2351 self.connection.create_function('web2py_extract', 2, 2352 SQLiteAdapter.web2py_extract) 2353 self.connection.create_function("REGEXP", 2, 2354 SQLiteAdapter.web2py_regexp)
2355 2356 # GIS functions 2357
2358 - def ST_ASGEOJSON(self, first, second):
2359 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2360 second['precision'], second['options'])
2361
2362 - def ST_ASTEXT(self, first):
2363 return 'AsText(%s)' %(self.expand(first))
2364
2365 - def ST_CONTAINS(self, first, second):
2366 return 'Contains(%s,%s)' %(self.expand(first), 2367 self.expand(second, first.type))
2368
2369 - def ST_DISTANCE(self, first, second):
2370 return 'Distance(%s,%s)' %(self.expand(first), 2371 self.expand(second, first.type))
2372
2373 - def ST_EQUALS(self, first, second):
2374 return 'Equals(%s,%s)' %(self.expand(first), 2375 self.expand(second, first.type))
2376
2377 - def ST_INTERSECTS(self, first, second):
2378 return 'Intersects(%s,%s)' %(self.expand(first), 2379 self.expand(second, first.type))
2380
2381 - def ST_OVERLAPS(self, first, second):
2382 return 'Overlaps(%s,%s)' %(self.expand(first), 2383 self.expand(second, first.type))
2384
2385 - def ST_SIMPLIFY(self, first, second):
2386 return 'Simplify(%s,%s)' %(self.expand(first), 2387 self.expand(second, 'double'))
2388
2389 - def ST_TOUCHES(self, first, second):
2390 return 'Touches(%s,%s)' %(self.expand(first), 2391 self.expand(second, first.type))
2392
2393 - def ST_WITHIN(self, first, second):
2394 return 'Within(%s,%s)' %(self.expand(first), 2395 self.expand(second, first.type))
2396
2397 - def represent(self, obj, fieldtype):
2398 field_is_type = fieldtype.startswith 2399 if field_is_type('geo'): 2400 srid = 4326 # Spatialite default srid for geometry 2401 geotype, parms = fieldtype[:-1].split('(') 2402 parms = parms.split(',') 2403 if len(parms) >= 2: 2404 schema, srid = parms[:2] 2405 # if field_is_type('geometry'): 2406 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2407 # elif field_is_type('geography'): 2408 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2409 # else: 2410 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2411 return value 2412 return BaseAdapter.represent(self, obj, fieldtype)
2413
2414 2415 -class JDBCSQLiteAdapter(SQLiteAdapter):
2416 drivers = ('zxJDBC_sqlite',) 2417
2418 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2419 credential_decoder=IDENTITY, driver_args={}, 2420 adapter_args={}, do_connect=True, after_connection=None):
2421 self.db = db 2422 self.dbengine = "sqlite" 2423 self.uri = uri 2424 if do_connect: self.find_driver(adapter_args) 2425 self.pool_size = pool_size 2426 self.folder = folder 2427 self.db_codec = db_codec 2428 self._after_connection = after_connection 2429 self.find_or_make_work_folder() 2430 path_encoding = sys.getfilesystemencoding() \ 2431 or locale.getdefaultlocale()[1] or 'utf8' 2432 if uri.startswith('sqlite:memory'): 2433 self.dbpath = ':memory:' 2434 else: 2435 self.dbpath = uri.split('://',1)[1] 2436 if self.dbpath[0] != '/': 2437 self.dbpath = pjoin( 2438 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2439 def connector(dbpath=self.dbpath,driver_args=driver_args): 2440 return self.driver.connect( 2441 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2442 **driver_args)
2443 self.connector = connector 2444 if do_connect: self.reconnect()
2445
2446 - def after_connection(self):
2447 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2448 self.connection.create_function('web2py_extract', 2, 2449 SQLiteAdapter.web2py_extract)
2450
2451 - def execute(self, a):
2452 return self.log_execute(a)
2453
2454 2455 -class MySQLAdapter(BaseAdapter):
2456 drivers = ('MySQLdb','pymysql', 'mysqlconnector') 2457 2458 commit_on_alter_table = True 2459 support_distributed_transaction = True 2460 types = { 2461 'boolean': 'CHAR(1)', 2462 'string': 'VARCHAR(%(length)s)', 2463 'text': 'LONGTEXT', 2464 'json': 'LONGTEXT', 2465 'password': 'VARCHAR(%(length)s)', 2466 'blob': 'LONGBLOB', 2467 'upload': 'VARCHAR(%(length)s)', 2468 'integer': 'INT', 2469 'bigint': 'BIGINT', 2470 'float': 'FLOAT', 2471 'double': 'DOUBLE', 2472 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2473 'date': 'DATE', 2474 'time': 'TIME', 2475 'datetime': 'DATETIME', 2476 'id': 'INT AUTO_INCREMENT NOT NULL', 2477 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2478 'list:integer': 'LONGTEXT', 2479 'list:string': 'LONGTEXT', 2480 'list:reference': 'LONGTEXT', 2481 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2482 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2483 } 2484 2485 QUOTE_TEMPLATE = "`%s`" 2486
2487 - def varquote(self,name):
2488 return varquote_aux(name,'`%s`')
2489
2490 - def RANDOM(self):
2491 return 'RAND()'
2492
2493 - def SUBSTRING(self,field,parameters):
2494 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2495 parameters[0], parameters[1])
2496
2497 - def EPOCH(self, first):
2498 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2499
2500 - def CONCAT(self, *items):
2501 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2502
2503 - def REGEXP(self,first,second):
2504 return '(%s REGEXP %s)' % (self.expand(first), 2505 self.expand(second,'string'))
2506
2507 - def _drop(self,table,mode):
2508 # breaks db integrity but without this mysql does not drop table 2509 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2510 'SET FOREIGN_KEY_CHECKS=1;']
2511
2512 - def _insert_empty(self, table):
2513 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2514
2515 - def distributed_transaction_begin(self,key):
2516 self.execute('XA START;')
2517
2518 - def prepare(self,key):
2519 self.execute("XA END;") 2520 self.execute("XA PREPARE;")
2521
2522 - def commit_prepared(self,ley):
2523 self.execute("XA COMMIT;")
2524
2525 - def rollback_prepared(self,key):
2526 self.execute("XA ROLLBACK;")
2527 2528 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2529
2530 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2531 credential_decoder=IDENTITY, driver_args={}, 2532 adapter_args={}, do_connect=True, after_connection=None):
2533 self.db = db 2534 self.dbengine = "mysql" 2535 self.uri = uri 2536 if do_connect: self.find_driver(adapter_args,uri) 2537 self.pool_size = pool_size 2538 self.folder = folder 2539 self.db_codec = db_codec 2540 self._after_connection = after_connection 2541 self.find_or_make_work_folder() 2542 ruri = uri.split('://',1)[1] 2543 m = self.REGEX_URI.match(ruri) 2544 if not m: 2545 raise SyntaxError( 2546 "Invalid URI string in DAL: %s" % self.uri) 2547 user = credential_decoder(m.group('user')) 2548 if not user: 2549 raise SyntaxError('User required') 2550 password = credential_decoder(m.group('password')) 2551 if not password: 2552 password = '' 2553 host = m.group('host') 2554 if not host: 2555 raise SyntaxError('Host name required') 2556 db = m.group('db') 2557 if not db: 2558 raise SyntaxError('Database name required') 2559 port = int(m.group('port') or '3306') 2560 charset = m.group('charset') or 'utf8' 2561 driver_args.update(db=db, 2562 user=credential_decoder(user), 2563 passwd=credential_decoder(password), 2564 host=host, 2565 port=port, 2566 charset=charset) 2567 2568 2569 def connector(driver_args=driver_args): 2570 return self.driver.connect(**driver_args)
2571 self.connector = connector 2572 if do_connect: self.reconnect()
2573
2574 - def after_connection(self):
2575 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2576 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2577
2578 - def lastrowid(self,table):
2579 self.execute('select last_insert_id();') 2580 return int(self.cursor.fetchone()[0])
2581
2582 2583 -class PostgreSQLAdapter(BaseAdapter):
2584 drivers = ('psycopg2','pg8000') 2585 2586 support_distributed_transaction = True 2587 types = { 2588 'boolean': 'CHAR(1)', 2589 'string': 'VARCHAR(%(length)s)', 2590 'text': 'TEXT', 2591 'json': 'TEXT', 2592 'password': 'VARCHAR(%(length)s)', 2593 'blob': 'BYTEA', 2594 'upload': 'VARCHAR(%(length)s)', 2595 'integer': 'INTEGER', 2596 'bigint': 'BIGINT', 2597 'float': 'FLOAT', 2598 'double': 'FLOAT8', 2599 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2600 'date': 'DATE', 2601 'time': 'TIME', 2602 'datetime': 'TIMESTAMP', 2603 'id': 'SERIAL PRIMARY KEY', 2604 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2605 'list:integer': 'TEXT', 2606 'list:string': 'TEXT', 2607 'list:reference': 'TEXT', 2608 'geometry': 'GEOMETRY', 2609 'geography': 'GEOGRAPHY', 2610 'big-id': 'BIGSERIAL PRIMARY KEY', 2611 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2612 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2613 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2614 2615 } 2616 2617 QUOTE_TEMPLATE = '%s' 2618
2619 - def varquote(self,name):
2620 return varquote_aux(name,'"%s"')
2621
2622 - def adapt(self,obj):
2623 if self.driver_name == 'psycopg2': 2624 return psycopg2_adapt(obj).getquoted() 2625 elif self.driver_name == 'pg8000': 2626 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2627 else: 2628 return "'%s'" % str(obj).replace("'","''")
2629
2630 - def sequence_name(self,table):
2631 return '%s_id_Seq' % table
2632
2633 - def RANDOM(self):
2634 return 'RANDOM()'
2635
2636 - def ADD(self, first, second):
2637 t = first.type 2638 if t in ('text','string','password', 'json', 'upload','blob'): 2639 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2640 else: 2641 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2642
2643 - def distributed_transaction_begin(self,key):
2644 return
2645
2646 - def prepare(self,key):
2647 self.execute("PREPARE TRANSACTION '%s';" % key)
2648
2649 - def commit_prepared(self,key):
2650 self.execute("COMMIT PREPARED '%s';" % key)
2651
2652 - def rollback_prepared(self,key):
2653 self.execute("ROLLBACK PREPARED '%s';" % key)
2654
2655 - def create_sequence_and_triggers(self, query, table, **args):
2656 # following lines should only be executed if table._sequence_name does not exist 2657 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2658 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2659 # % (table._tablename, table._fieldname, table._sequence_name)) 2660 self.execute(query)
2661 2662 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2663
2664 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2665 credential_decoder=IDENTITY, driver_args={}, 2666 adapter_args={}, do_connect=True, srid=4326, 2667 after_connection=None):
2668 self.db = db 2669 self.dbengine = "postgres" 2670 self.uri = uri 2671 if do_connect: self.find_driver(adapter_args,uri) 2672 self.pool_size = pool_size 2673 self.folder = folder 2674 self.db_codec = db_codec 2675 self._after_connection = after_connection 2676 self.srid = srid 2677 self.find_or_make_work_folder() 2678 ruri = uri.split('://',1)[1] 2679 m = self.REGEX_URI.match(ruri) 2680 if not m: 2681 raise SyntaxError("Invalid URI string in DAL") 2682 user = credential_decoder(m.group('user')) 2683 if not user: 2684 raise SyntaxError('User required') 2685 password = credential_decoder(m.group('password')) 2686 if not password: 2687 password = '' 2688 host = m.group('host') 2689 if not host: 2690 raise SyntaxError('Host name required') 2691 db = m.group('db') 2692 if not db: 2693 raise SyntaxError('Database name required') 2694 port = m.group('port') or '5432' 2695 sslmode = m.group('sslmode') 2696 if sslmode: 2697 msg = ("dbname='%s' user='%s' host='%s' " 2698 "port=%s password='%s' sslmode='%s'") \ 2699 % (db, user, host, port, password, sslmode) 2700 else: 2701 msg = ("dbname='%s' user='%s' host='%s' " 2702 "port=%s password='%s'") \ 2703 % (db, user, host, port, password) 2704 # choose diver according uri 2705 if self.driver: 2706 self.__version__ = "%s %s" % (self.driver.__name__, 2707 self.driver.__version__) 2708 else: 2709 self.__version__ = None 2710 def connector(msg=msg,driver_args=driver_args): 2711 return self.driver.connect(msg,**driver_args)
2712 self.connector = connector 2713 if do_connect: self.reconnect()
2714
2715 - def after_connection(self):
2716 self.connection.set_client_encoding('UTF8') 2717 self.execute("SET standard_conforming_strings=on;") 2718 self.try_json()
2719
2720 - def lastrowid(self,table):
2721 self.execute("select currval('%s')" % table._sequence_name) 2722 return int(self.cursor.fetchone()[0])
2723
2724 - def try_json(self):
2725 # check JSON data type support 2726 # (to be added to after_connection) 2727 if self.driver_name == "pg8000": 2728 supports_json = self.connection.server_version >= "9.2.0" 2729 elif (self.driver_name == "psycopg2") and \ 2730 (self.driver.__version__ >= "2.0.12"): 2731 supports_json = self.connection.server_version >= 90200 2732 elif self.driver_name == "zxJDBC": 2733 supports_json = self.connection.dbversion >= "9.2.0" 2734 else: supports_json = None 2735 if supports_json: 2736 self.types["json"] = "JSON" 2737 self.native_json = True 2738 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2739
2740 - def LIKE(self,first,second):
2741 args = (self.expand(first), self.expand(second,'string')) 2742 if not first.type in ('string', 'text', 'json'): 2743 return '(%s LIKE %s)' % ( 2744 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2745 else: 2746 return '(%s LIKE %s)' % args
2747
2748 - def ILIKE(self,first,second):
2749 args = (self.expand(first), self.expand(second,'string')) 2750 if not first.type in ('string', 'text', 'json'): 2751 return '(%s LIKE %s)' % ( 2752 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2753 else: 2754 return '(%s ILIKE %s)' % args
2755
2756 - def REGEXP(self,first,second):
2757 return '(%s ~ %s)' % (self.expand(first), 2758 self.expand(second,'string'))
2759
2760 - def STARTSWITH(self,first,second):
2761 return '(%s ILIKE %s)' % (self.expand(first), 2762 self.expand(second+'%','string'))
2763
2764 - def ENDSWITH(self,first,second):
2765 return '(%s ILIKE %s)' % (self.expand(first), 2766 self.expand('%'+second,'string'))
2767 2768 # GIS functions 2769
2770 - def ST_ASGEOJSON(self, first, second):
2771 """ 2772 http://postgis.org/docs/ST_AsGeoJSON.html 2773 """ 2774 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2775 self.expand(first), second['precision'], second['options'])
2776
2777 - def ST_ASTEXT(self, first):
2778 """ 2779 http://postgis.org/docs/ST_AsText.html 2780 """ 2781 return 'ST_AsText(%s)' %(self.expand(first))
2782
2783 - def ST_X(self, first):
2784 """ 2785 http://postgis.org/docs/ST_X.html 2786 """ 2787 return 'ST_X(%s)' %(self.expand(first))
2788
2789 - def ST_Y(self, first):
2790 """ 2791 http://postgis.org/docs/ST_Y.html 2792 """ 2793 return 'ST_Y(%s)' %(self.expand(first))
2794
2795 - def ST_CONTAINS(self, first, second):
2796 """ 2797 http://postgis.org/docs/ST_Contains.html 2798 """ 2799 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2800
2801 - def ST_DISTANCE(self, first, second):
2802 """ 2803 http://postgis.org/docs/ST_Distance.html 2804 """ 2805 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2806
2807 - def ST_EQUALS(self, first, second):
2808 """ 2809 http://postgis.org/docs/ST_Equals.html 2810 """ 2811 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2812
2813 - def ST_INTERSECTS(self, first, second):
2814 """ 2815 http://postgis.org/docs/ST_Intersects.html 2816 """ 2817 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2818
2819 - def ST_OVERLAPS(self, first, second):
2820 """ 2821 http://postgis.org/docs/ST_Overlaps.html 2822 """ 2823 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2824
2825 - def ST_SIMPLIFY(self, first, second):
2826 """ 2827 http://postgis.org/docs/ST_Simplify.html 2828 """ 2829 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2830
2831 - def ST_TOUCHES(self, first, second):
2832 """ 2833 http://postgis.org/docs/ST_Touches.html 2834 """ 2835 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2836
2837 - def ST_WITHIN(self, first, second):
2838 """ 2839 http://postgis.org/docs/ST_Within.html 2840 """ 2841 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2842
2843 - def represent(self, obj, fieldtype):
2844 field_is_type = fieldtype.startswith 2845 if field_is_type('geo'): 2846 srid = 4326 # postGIS default srid for geometry 2847 geotype, parms = fieldtype[:-1].split('(') 2848 parms = parms.split(',') 2849 if len(parms) >= 2: 2850 schema, srid = parms[:2] 2851 if field_is_type('geometry'): 2852 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2853 elif field_is_type('geography'): 2854 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2855 # else: 2856 # raise SyntaxError('Invalid field type %s' %fieldtype) 2857 return value 2858 return BaseAdapter.represent(self, obj, fieldtype)
2859
2860 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2861 drivers = ('psycopg2','pg8000') 2862 2863 types = { 2864 'boolean': 'CHAR(1)', 2865 'string': 'VARCHAR(%(length)s)', 2866 'text': 'TEXT', 2867 'json': 'TEXT', 2868 'password': 'VARCHAR(%(length)s)', 2869 'blob': 'BYTEA', 2870 'upload': 'VARCHAR(%(length)s)', 2871 'integer': 'INTEGER', 2872 'bigint': 'BIGINT', 2873 'float': 'FLOAT', 2874 'double': 'FLOAT8', 2875 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2876 'date': 'DATE', 2877 'time': 'TIME', 2878 'datetime': 'TIMESTAMP', 2879 'id': 'SERIAL PRIMARY KEY', 2880 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2881 'list:integer': 'BIGINT[]', 2882 'list:string': 'TEXT[]', 2883 'list:reference': 'BIGINT[]', 2884 'geometry': 'GEOMETRY', 2885 'geography': 'GEOGRAPHY', 2886 'big-id': 'BIGSERIAL PRIMARY KEY', 2887 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2888 } 2889
2890 - def parse_list_integers(self, value, field_type):
2891 return value
2892
2893 - def parse_list_references(self, value, field_type):
2894 return [self.parse_reference(r, field_type[5:]) for r in value]
2895
2896 - def parse_list_strings(self, value, field_type):
2897 return value
2898
2899 - def represent(self, obj, fieldtype):
2900 field_is_type = fieldtype.startswith 2901 if field_is_type('list:'): 2902 if not obj: 2903 obj = [] 2904 elif not isinstance(obj, (list, tuple)): 2905 obj = [obj] 2906 if field_is_type('list:string'): 2907 obj = map(str,obj) 2908 else: 2909 obj = map(int,obj) 2910 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2911 return BaseAdapter.represent(self, obj, fieldtype)
2912
2913 2914 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2915 drivers = ('zxJDBC',) 2916 2917 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2918
2919 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2920 credential_decoder=IDENTITY, driver_args={}, 2921 adapter_args={}, do_connect=True, after_connection=None ):
2922 self.db = db 2923 self.dbengine = "postgres" 2924 self.uri = uri 2925 if do_connect: self.find_driver(adapter_args,uri) 2926 self.pool_size = pool_size 2927 self.folder = folder 2928 self.db_codec = db_codec 2929 self._after_connection = after_connection 2930 self.find_or_make_work_folder() 2931 ruri = uri.split('://',1)[1] 2932 m = self.REGEX_URI.match(ruri) 2933 if not m: 2934 raise SyntaxError("Invalid URI string in DAL") 2935 user = credential_decoder(m.group('user')) 2936 if not user: 2937 raise SyntaxError('User required') 2938 password = credential_decoder(m.group('password')) 2939 if not password: 2940 password = '' 2941 host = m.group('host') 2942 if not host: 2943 raise SyntaxError('Host name required') 2944 db = m.group('db') 2945 if not db: 2946 raise SyntaxError('Database name required') 2947 port = m.group('port') or '5432' 2948 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2949 def connector(msg=msg,driver_args=driver_args): 2950 return self.driver.connect(*msg,**driver_args)
2951 self.connector = connector 2952 if do_connect: self.reconnect()
2953
2954 - def after_connection(self):
2955 self.connection.set_client_encoding('UTF8') 2956 self.execute('BEGIN;') 2957 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2958 self.try_json()
2959
2960 2961 -class OracleAdapter(BaseAdapter):
2962 drivers = ('cx_Oracle',) 2963 2964 commit_on_alter_table = False 2965 types = { 2966 'boolean': 'CHAR(1)', 2967 'string': 'VARCHAR2(%(length)s)', 2968 'text': 'CLOB', 2969 'json': 'CLOB', 2970 'password': 'VARCHAR2(%(length)s)', 2971 'blob': 'CLOB', 2972 'upload': 'VARCHAR2(%(length)s)', 2973 'integer': 'INT', 2974 'bigint': 'NUMBER', 2975 'float': 'FLOAT', 2976 'double': 'BINARY_DOUBLE', 2977 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2978 'date': 'DATE', 2979 'time': 'CHAR(8)', 2980 'datetime': 'DATE', 2981 'id': 'NUMBER PRIMARY KEY', 2982 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2983 'list:integer': 'CLOB', 2984 'list:string': 'CLOB', 2985 'list:reference': 'CLOB', 2986 'big-id': 'NUMBER PRIMARY KEY', 2987 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2988 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2989 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2990 } 2991
2992 - def sequence_name(self,tablename):
2993 return '%s_sequence' % tablename
2994
2995 - def trigger_name(self,tablename):
2996 return '%s_trigger' % tablename
2997
2998 - def LEFT_JOIN(self):
2999 return 'LEFT OUTER JOIN'
3000
3001 - def RANDOM(self):
3002 return 'dbms_random.value'
3003
3004 - def NOT_NULL(self,default,field_type):
3005 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3006
3007 - def _drop(self,table,mode):
3008 sequence_name = table._sequence_name 3009 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
3010
3011 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3012 if limitby: 3013 (lmin, lmax) = limitby 3014 if len(sql_w) > 1: 3015 sql_w_row = sql_w + ' AND w_row > %i' % lmin 3016 else: 3017 sql_w_row = 'WHERE w_row > %i' % lmin 3018 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 3019 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3020
3021 - def constraint_name(self, tablename, fieldname):
3022 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 3023 if len(constraint_name)>30: 3024 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 3025 return constraint_name
3026
3027 - def represent_exceptions(self, obj, fieldtype):
3028 if fieldtype == 'blob': 3029 obj = base64.b64encode(str(obj)) 3030 return ":CLOB('%s')" % obj 3031 elif fieldtype == 'date': 3032 if isinstance(obj, (datetime.date, datetime.datetime)): 3033 obj = obj.isoformat()[:10] 3034 else: 3035 obj = str(obj) 3036 return "to_date('%s','yyyy-mm-dd')" % obj 3037 elif fieldtype == 'datetime': 3038 if isinstance(obj, datetime.datetime): 3039 obj = obj.isoformat()[:19].replace('T',' ') 3040 elif isinstance(obj, datetime.date): 3041 obj = obj.isoformat()[:10]+' 00:00:00' 3042 else: 3043 obj = str(obj) 3044 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3045 return None
3046
3047 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3048 credential_decoder=IDENTITY, driver_args={}, 3049 adapter_args={}, do_connect=True, after_connection=None):
3050 self.db = db 3051 self.dbengine = "oracle" 3052 self.uri = uri 3053 if do_connect: self.find_driver(adapter_args,uri) 3054 self.pool_size = pool_size 3055 self.folder = folder 3056 self.db_codec = db_codec 3057 self._after_connection = after_connection 3058 self.find_or_make_work_folder() 3059 ruri = uri.split('://',1)[1] 3060 if not 'threaded' in driver_args: 3061 driver_args['threaded']=True 3062 def connector(uri=ruri,driver_args=driver_args): 3063 return self.driver.connect(uri,**driver_args)
3064 self.connector = connector 3065 if do_connect: self.reconnect()
3066
3067 - def after_connection(self):
3068 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3069 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3070 3071 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3072
3073 - def execute(self, command, args=None):
3074 args = args or [] 3075 i = 1 3076 while True: 3077 m = self.oracle_fix.match(command) 3078 if not m: 3079 break 3080 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3081 args.append(m.group('clob')[6:-2].replace("''", "'")) 3082 i += 1 3083 if command[-1:]==';': 3084 command = command[:-1] 3085 return self.log_execute(command, args)
3086
3087 - def create_sequence_and_triggers(self, query, table, **args):
3088 tablename = table._tablename 3089 id_name = table._id.name 3090 sequence_name = table._sequence_name 3091 trigger_name = table._trigger_name 3092 self.execute(query) 3093 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3094 self.execute(""" 3095 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3096 DECLARE 3097 curr_val NUMBER; 3098 diff_val NUMBER; 3099 PRAGMA autonomous_transaction; 3100 BEGIN 3101 IF :NEW.%(id)s IS NOT NULL THEN 3102 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3103 diff_val := :NEW.%(id)s - curr_val - 1; 3104 IF diff_val != 0 THEN 3105 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3106 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3107 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3108 END IF; 3109 END IF; 3110 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3111 END; 3112 """ % dict(trigger_name=trigger_name, tablename=tablename, 3113 sequence_name=sequence_name,id=id_name))
3114
3115 - def lastrowid(self,table):
3116 sequence_name = table._sequence_name 3117 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3118 return long(self.cursor.fetchone()[0])
3119 3120 #def parse_value(self, value, field_type, blob_decode=True): 3121 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3122 # try: 3123 # value = value.read() 3124 # except self.driver.ProgrammingError: 3125 # # After a subsequent fetch the LOB value is not valid anymore 3126 # pass 3127 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3128
3129 - def _fetchall(self):
3130 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3131 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3132 for c in r]) for r in self.cursor] 3133 else: 3134 return self.cursor.fetchall()
3135
3136 -class MSSQLAdapter(BaseAdapter):
3137 drivers = ('pyodbc',) 3138 T_SEP = 'T' 3139 3140 QUOTE_TEMPLATE = "[%s]" 3141 3142 types = { 3143 'boolean': 'BIT', 3144 'string': 'VARCHAR(%(length)s)', 3145 'text': 'TEXT', 3146 'json': 'TEXT', 3147 'password': 'VARCHAR(%(length)s)', 3148 'blob': 'IMAGE', 3149 'upload': 'VARCHAR(%(length)s)', 3150 'integer': 'INT', 3151 'bigint': 'BIGINT', 3152 'float': 'FLOAT', 3153 'double': 'FLOAT', 3154 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3155 'date': 'DATETIME', 3156 'time': 'CHAR(8)', 3157 'datetime': 'DATETIME', 3158 'id': 'INT IDENTITY PRIMARY KEY', 3159 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3160 'list:integer': 'TEXT', 3161 'list:string': 'TEXT', 3162 'list:reference': 'TEXT', 3163 'geometry': 'geometry', 3164 'geography': 'geography', 3165 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3166 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3167 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3168 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3169 } 3170
3171 - def concat_add(self,tablename):
3172 return '; ALTER TABLE %s ADD ' % tablename
3173
3174 - def varquote(self,name):
3175 return varquote_aux(name,'[%s]')
3176
3177 - def EXTRACT(self,field,what):
3178 return "DATEPART(%s,%s)" % (what, self.expand(field))
3179
3180 - def LEFT_JOIN(self):
3181 return 'LEFT OUTER JOIN'
3182
3183 - def RANDOM(self):
3184 return 'NEWID()'
3185
3186 - def ALLOW_NULL(self):
3187 return ' NULL'
3188
3189 - def CAST(self, first, second):
3190 return first # apparently no cast necessary in MSSQL
3191
3192 - def SUBSTRING(self,field,parameters):
3193 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3194
3195 - def PRIMARY_KEY(self,key):
3196 return 'PRIMARY KEY CLUSTERED (%s)' % key
3197
3198 - def AGGREGATE(self, first, what):
3199 if what == 'LENGTH': 3200 what = 'LEN' 3201 return "%s(%s)" % (what, self.expand(first))
3202 3203
3204 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3205 if limitby: 3206 (lmin, lmax) = limitby 3207 sql_s += ' TOP %i' % lmax 3208 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3209 3210 TRUE = 1 3211 FALSE = 0 3212 3213 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3214 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3215 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3216
3217 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3218 credential_decoder=IDENTITY, driver_args={}, 3219 adapter_args={}, do_connect=True, srid=4326, 3220 after_connection=None):
3221 self.db = db 3222 self.dbengine = "mssql" 3223 self.uri = uri 3224 if do_connect: self.find_driver(adapter_args,uri) 3225 self.pool_size = pool_size 3226 self.folder = folder 3227 self.db_codec = db_codec 3228 self._after_connection = after_connection 3229 self.srid = srid 3230 self.find_or_make_work_folder() 3231 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3232 ruri = uri.split('://',1)[1] 3233 if '@' not in ruri: 3234 try: 3235 m = self.REGEX_DSN.match(ruri) 3236 if not m: 3237 raise SyntaxError( 3238 'Parsing uri string(%s) has no result' % self.uri) 3239 dsn = m.group('dsn') 3240 if not dsn: 3241 raise SyntaxError('DSN required') 3242 except SyntaxError: 3243 e = sys.exc_info()[1] 3244 LOGGER.error('NdGpatch error') 3245 raise e 3246 # was cnxn = 'DSN=%s' % dsn 3247 cnxn = dsn 3248 else: 3249 m = self.REGEX_URI.match(ruri) 3250 if not m: 3251 raise SyntaxError( 3252 "Invalid URI string in DAL: %s" % self.uri) 3253 user = credential_decoder(m.group('user')) 3254 if not user: 3255 raise SyntaxError('User required') 3256 password = credential_decoder(m.group('password')) 3257 if not password: 3258 password = '' 3259 host = m.group('host') 3260 if not host: 3261 raise SyntaxError('Host name required') 3262 db = m.group('db') 3263 if not db: 3264 raise SyntaxError('Database name required') 3265 port = m.group('port') or '1433' 3266 # Parse the optional url name-value arg pairs after the '?' 3267 # (in the form of arg1=value1&arg2=value2&...) 3268 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3269 argsdict = { 'DRIVER':'{SQL Server}' } 3270 urlargs = m.group('urlargs') or '' 3271 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3272 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3273 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3274 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3275 % (host, port, db, user, password, urlargs) 3276 def connector(cnxn=cnxn,driver_args=driver_args): 3277 return self.driver.connect(cnxn,**driver_args)
3278 self.connector = connector 3279 if do_connect: self.reconnect()
3280
3281 - def lastrowid(self,table):
3282 #self.execute('SELECT @@IDENTITY;') 3283 self.execute('SELECT SCOPE_IDENTITY();') 3284 return long(self.cursor.fetchone()[0])
3285
3286 - def rowslice(self,rows,minimum=0,maximum=None):
3287 if maximum is None: 3288 return rows[minimum:] 3289 return rows[minimum:maximum]
3290
3291 - def EPOCH(self, first):
3292 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3293
3294 - def CONCAT(self, *items):
3295 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3296 3297 # GIS Spatial Extensions 3298 3299 # No STAsGeoJSON in MSSQL 3300
3301 - def ST_ASTEXT(self, first):
3302 return '%s.STAsText()' %(self.expand(first))
3303
3304 - def ST_CONTAINS(self, first, second):
3305 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3306
3307 - def ST_DISTANCE(self, first, second):
3308 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3309
3310 - def ST_EQUALS(self, first, second):
3311 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3312
3313 - def ST_INTERSECTS(self, first, second):
3314 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3315
3316 - def ST_OVERLAPS(self, first, second):
3317 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3318 3319 # no STSimplify in MSSQL 3320
3321 - def ST_TOUCHES(self, first, second):
3322 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3323
3324 - def ST_WITHIN(self, first, second):
3325 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3326
3327 - def represent(self, obj, fieldtype):
3328 field_is_type = fieldtype.startswith 3329 if field_is_type('geometry'): 3330 srid = 0 # MS SQL default srid for geometry 3331 geotype, parms = fieldtype[:-1].split('(') 3332 if parms: 3333 srid = parms 3334 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3335 elif fieldtype == 'geography': 3336 srid = 4326 # MS SQL default srid for geography 3337 geotype, parms = fieldtype[:-1].split('(') 3338 if parms: 3339 srid = parms 3340 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3341 # else: 3342 # raise SyntaxError('Invalid field type %s' %fieldtype) 3343 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3344 return BaseAdapter.represent(self, obj, fieldtype)
3345
3346 3347 -class MSSQL3Adapter(MSSQLAdapter):
3348 """ experimental support for pagination in MSSQL"""
3349 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3350 if limitby: 3351 (lmin, lmax) = limitby 3352 if lmin == 0: 3353 sql_s += ' TOP %i' % lmax 3354 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3355 lmin += 1 3356 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3357 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3358 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3359 sql_f_inner = [f for f in sql_f.split(',')] 3360 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3361 sql_f_iproxy = ', '.join(sql_f_iproxy) 3362 sql_f_oproxy = ', '.join(sql_f_outer) 3363 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3364 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3365 - def rowslice(self,rows,minimum=0,maximum=None):
3366 return rows
3367
3368 -class MSSQL4Adapter(MSSQLAdapter):
3369 """ support for true pagination in MSSQL >= 2012""" 3370
3371 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3372 if not sql_o: 3373 #if there is no orderby, we can't use the brand new statements 3374 #that being said, developer chose its own poison, so be it random 3375 sql_o += ' ORDER BY %s' % self.RANDOM() 3376 if limitby: 3377 (lmin, lmax) = limitby 3378 sql_o += ' OFFSET %i ROWS FETCH NEXT %i ROWS ONLY' % (lmin, lmax - lmin) 3379 return 'SELECT %s %s FROM %s%s%s;' % \ 3380 (sql_s, sql_f, sql_t, sql_w, sql_o)
3381
3382 - def rowslice(self,rows,minimum=0,maximum=None):
3383 return rows
3384
3385 -class MSSQL2Adapter(MSSQLAdapter):
3386 drivers = ('pyodbc',) 3387 3388 types = { 3389 'boolean': 'CHAR(1)', 3390 'string': 'NVARCHAR(%(length)s)', 3391 'text': 'NTEXT', 3392 'json': 'NTEXT', 3393 'password': 'NVARCHAR(%(length)s)', 3394 'blob': 'IMAGE', 3395 'upload': 'NVARCHAR(%(length)s)', 3396 'integer': 'INT', 3397 'bigint': 'BIGINT', 3398 'float': 'FLOAT', 3399 'double': 'FLOAT', 3400 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3401 'date': 'DATETIME', 3402 'time': 'CHAR(8)', 3403 'datetime': 'DATETIME', 3404 'id': 'INT IDENTITY PRIMARY KEY', 3405 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3406 'list:integer': 'NTEXT', 3407 'list:string': 'NTEXT', 3408 'list:reference': 'NTEXT', 3409 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3410 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3411 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3412 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3413 } 3414
3415 - def represent(self, obj, fieldtype):
3416 value = BaseAdapter.represent(self, obj, fieldtype) 3417 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3418 value = 'N'+value 3419 return value
3420
3421 - def execute(self,a):
3422 return self.log_execute(a.decode('utf8'))
3423
3424 -class VerticaAdapter(MSSQLAdapter):
3425 drivers = ('pyodbc',) 3426 T_SEP = ' ' 3427 3428 types = { 3429 'boolean': 'BOOLEAN', 3430 'string': 'VARCHAR(%(length)s)', 3431 'text': 'BYTEA', 3432 'json': 'VARCHAR(%(length)s)', 3433 'password': 'VARCHAR(%(length)s)', 3434 'blob': 'BYTEA', 3435 'upload': 'VARCHAR(%(length)s)', 3436 'integer': 'INT', 3437 'bigint': 'BIGINT', 3438 'float': 'FLOAT', 3439 'double': 'DOUBLE PRECISION', 3440 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3441 'date': 'DATE', 3442 'time': 'TIME', 3443 'datetime': 'DATETIME', 3444 'id': 'IDENTITY', 3445 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3446 'list:integer': 'BYTEA', 3447 'list:string': 'BYTEA', 3448 'list:reference': 'BYTEA', 3449 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3450 } 3451 3452
3453 - def EXTRACT(self, first, what):
3454 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3455
3456 - def _truncate(self, table, mode=''):
3457 tablename = table._tablename 3458 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3459
3460 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3461 if limitby: 3462 (lmin, lmax) = limitby 3463 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3464 return 'SELECT %s %s FROM %s%s%s;' % \ 3465 (sql_s, sql_f, sql_t, sql_w, sql_o)
3466
3467 - def lastrowid(self,table):
3468 self.execute('SELECT LAST_INSERT_ID();') 3469 return long(self.cursor.fetchone()[0])
3470
3471 - def execute(self, a):
3472 return self.log_execute(a)
3473
3474 -class SybaseAdapter(MSSQLAdapter):
3475 drivers = ('Sybase',) 3476 3477 types = { 3478 'boolean': 'BIT', 3479 'string': 'CHAR VARYING(%(length)s)', 3480 'text': 'TEXT', 3481 'json': 'TEXT', 3482 'password': 'CHAR VARYING(%(length)s)', 3483 'blob': 'IMAGE', 3484 'upload': 'CHAR VARYING(%(length)s)', 3485 'integer': 'INT', 3486 'bigint': 'BIGINT', 3487 'float': 'FLOAT', 3488 'double': 'FLOAT', 3489 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3490 'date': 'DATETIME', 3491 'time': 'CHAR(8)', 3492 'datetime': 'DATETIME', 3493 'id': 'INT IDENTITY PRIMARY KEY', 3494 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3495 'list:integer': 'TEXT', 3496 'list:string': 'TEXT', 3497 'list:reference': 'TEXT', 3498 'geometry': 'geometry', 3499 'geography': 'geography', 3500 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3501 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3502 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3503 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3504 } 3505 3506
3507 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3508 credential_decoder=IDENTITY, driver_args={}, 3509 adapter_args={}, do_connect=True, srid=4326, 3510 after_connection=None):
3511 self.db = db 3512 self.dbengine = "sybase" 3513 self.uri = uri 3514 if do_connect: self.find_driver(adapter_args,uri) 3515 self.pool_size = pool_size 3516 self.folder = folder 3517 self.db_codec = db_codec 3518 self._after_connection = after_connection 3519 self.srid = srid 3520 self.find_or_make_work_folder() 3521 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3522 ruri = uri.split('://',1)[1] 3523 if '@' not in ruri: 3524 try: 3525 m = self.REGEX_DSN.match(ruri) 3526 if not m: 3527 raise SyntaxError( 3528 'Parsing uri string(%s) has no result' % self.uri) 3529 dsn = m.group('dsn') 3530 if not dsn: 3531 raise SyntaxError('DSN required') 3532 except SyntaxError: 3533 e = sys.exc_info()[1] 3534 LOGGER.error('NdGpatch error') 3535 raise e 3536 else: 3537 m = self.REGEX_URI.match(uri) 3538 if not m: 3539 raise SyntaxError( 3540 "Invalid URI string in DAL: %s" % self.uri) 3541 user = credential_decoder(m.group('user')) 3542 if not user: 3543 raise SyntaxError('User required') 3544 password = credential_decoder(m.group('password')) 3545 if not password: 3546 password = '' 3547 host = m.group('host') 3548 if not host: 3549 raise SyntaxError('Host name required') 3550 db = m.group('db') 3551 if not db: 3552 raise SyntaxError('Database name required') 3553 port = m.group('port') or '1433' 3554 3555 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3556 3557 driver_args.update(user = credential_decoder(user), 3558 password = credential_decoder(password)) 3559 3560 def connector(dsn=dsn,driver_args=driver_args): 3561 return self.driver.connect(dsn,**driver_args)
3562 self.connector = connector 3563 if do_connect: self.reconnect()
3564
3565 3566 -class FireBirdAdapter(BaseAdapter):
3567 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3568 3569 commit_on_alter_table = False 3570 support_distributed_transaction = True 3571 types = { 3572 'boolean': 'CHAR(1)', 3573 'string': 'VARCHAR(%(length)s)', 3574 'text': 'BLOB SUB_TYPE 1', 3575 'json': 'BLOB SUB_TYPE 1', 3576 'password': 'VARCHAR(%(length)s)', 3577 'blob': 'BLOB SUB_TYPE 0', 3578 'upload': 'VARCHAR(%(length)s)', 3579 'integer': 'INTEGER', 3580 'bigint': 'BIGINT', 3581 'float': 'FLOAT', 3582 'double': 'DOUBLE PRECISION', 3583 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3584 'date': 'DATE', 3585 'time': 'TIME', 3586 'datetime': 'TIMESTAMP', 3587 'id': 'INTEGER PRIMARY KEY', 3588 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3589 'list:integer': 'BLOB SUB_TYPE 1', 3590 'list:string': 'BLOB SUB_TYPE 1', 3591 'list:reference': 'BLOB SUB_TYPE 1', 3592 'big-id': 'BIGINT PRIMARY KEY', 3593 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3594 } 3595
3596 - def sequence_name(self,tablename):
3597 return 'genid_%s' % tablename
3598
3599 - def trigger_name(self,tablename):
3600 return 'trg_id_%s' % tablename
3601
3602 - def RANDOM(self):
3603 return 'RAND()'
3604
3605 - def EPOCH(self, first):
3606 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3607
3608 - def NOT_NULL(self,default,field_type):
3609 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3610
3611 - def SUBSTRING(self,field,parameters):
3612 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3613
3614 - def LENGTH(self, first):
3615 return "CHAR_LENGTH(%s)" % self.expand(first)
3616
3617 - def CONTAINS(self,first,second,case_sensitive=False):
3618 if first.type.startswith('list:'): 3619 second = Expression(None,self.CONCAT('|',Expression( 3620 None,self.REPLACE(second,('|','||'))),'|')) 3621 return '(%s CONTAINING %s)' % (self.expand(first), 3622 self.expand(second, 'string'))
3623
3624 - def _drop(self,table,mode):
3625 sequence_name = table._sequence_name 3626 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3627
3628 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3629 if limitby: 3630 (lmin, lmax) = limitby 3631 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3632 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3633
3634 - def _truncate(self,table,mode = ''):
3635 return ['DELETE FROM %s;' % table._tablename, 3636 'SET GENERATOR %s TO 0;' % table._sequence_name]
3637 3638 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3639
3640 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3641 credential_decoder=IDENTITY, driver_args={}, 3642 adapter_args={}, do_connect=True, after_connection=None):
3643 self.db = db 3644 self.dbengine = "firebird" 3645 self.uri = uri 3646 if do_connect: self.find_driver(adapter_args,uri) 3647 self.pool_size = pool_size 3648 self.folder = folder 3649 self.db_codec = db_codec 3650 self._after_connection = after_connection 3651 self.find_or_make_work_folder() 3652 ruri = uri.split('://',1)[1] 3653 m = self.REGEX_URI.match(ruri) 3654 if not m: 3655 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3656 user = credential_decoder(m.group('user')) 3657 if not user: 3658 raise SyntaxError('User required') 3659 password = credential_decoder(m.group('password')) 3660 if not password: 3661 password = '' 3662 host = m.group('host') 3663 if not host: 3664 raise SyntaxError('Host name required') 3665 port = int(m.group('port') or 3050) 3666 db = m.group('db') 3667 if not db: 3668 raise SyntaxError('Database name required') 3669 charset = m.group('charset') or 'UTF8' 3670 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3671 user = credential_decoder(user), 3672 password = credential_decoder(password), 3673 charset = charset) 3674 3675 def connector(driver_args=driver_args): 3676 return self.driver.connect(**driver_args)
3677 self.connector = connector 3678 if do_connect: self.reconnect()
3679
3680 - def create_sequence_and_triggers(self, query, table, **args):
3681 tablename = table._tablename 3682 sequence_name = table._sequence_name 3683 trigger_name = table._trigger_name 3684 self.execute(query) 3685 self.execute('create generator %s;' % sequence_name) 3686 self.execute('set generator %s to 0;' % sequence_name) 3687 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3688
3689 - def lastrowid(self,table):
3690 sequence_name = table._sequence_name 3691 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3692 return long(self.cursor.fetchone()[0])
3693
3694 3695 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3696 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3697 3698 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3699
3700 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3701 credential_decoder=IDENTITY, driver_args={}, 3702 adapter_args={}, do_connect=True, after_connection=None):
3703 self.db = db 3704 self.dbengine = "firebird" 3705 self.uri = uri 3706 if do_connect: self.find_driver(adapter_args,uri) 3707 self.pool_size = pool_size 3708 self.folder = folder 3709 self.db_codec = db_codec 3710 self._after_connection = after_connection 3711 self.find_or_make_work_folder() 3712 ruri = uri.split('://',1)[1] 3713 m = self.REGEX_URI.match(ruri) 3714 if not m: 3715 raise SyntaxError( 3716 "Invalid URI string in DAL: %s" % self.uri) 3717 user = credential_decoder(m.group('user')) 3718 if not user: 3719 raise SyntaxError('User required') 3720 password = credential_decoder(m.group('password')) 3721 if not password: 3722 password = '' 3723 pathdb = m.group('path') 3724 if not pathdb: 3725 raise SyntaxError('Path required') 3726 charset = m.group('charset') 3727 if not charset: 3728 charset = 'UTF8' 3729 host = '' 3730 driver_args.update(host=host, 3731 database=pathdb, 3732 user=credential_decoder(user), 3733 password=credential_decoder(password), 3734 charset=charset) 3735 3736 def connector(driver_args=driver_args): 3737 return self.driver.connect(**driver_args)
3738 self.connector = connector 3739 if do_connect: self.reconnect()
3740
3741 -class InformixAdapter(BaseAdapter):
3742 drivers = ('informixdb',) 3743 3744 types = { 3745 'boolean': 'CHAR(1)', 3746 'string': 'VARCHAR(%(length)s)', 3747 'text': 'BLOB SUB_TYPE 1', 3748 'json': 'BLOB SUB_TYPE 1', 3749 'password': 'VARCHAR(%(length)s)', 3750 'blob': 'BLOB SUB_TYPE 0', 3751 'upload': 'VARCHAR(%(length)s)', 3752 'integer': 'INTEGER', 3753 'bigint': 'BIGINT', 3754 'float': 'FLOAT', 3755 'double': 'DOUBLE PRECISION', 3756 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3757 'date': 'DATE', 3758 'time': 'CHAR(8)', 3759 'datetime': 'DATETIME', 3760 'id': 'SERIAL', 3761 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3762 'list:integer': 'BLOB SUB_TYPE 1', 3763 'list:string': 'BLOB SUB_TYPE 1', 3764 'list:reference': 'BLOB SUB_TYPE 1', 3765 'big-id': 'BIGSERIAL', 3766 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3767 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3768 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3769 } 3770
3771 - def RANDOM(self):
3772 return 'Random()'
3773
3774 - def NOT_NULL(self,default,field_type):
3775 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3776
3777 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3778 if limitby: 3779 (lmin, lmax) = limitby 3780 fetch_amt = lmax - lmin 3781 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3782 if lmin and (dbms_version >= 10): 3783 # Requires Informix 10.0+ 3784 sql_s += ' SKIP %d' % (lmin, ) 3785 if fetch_amt and (dbms_version >= 9): 3786 # Requires Informix 9.0+ 3787 sql_s += ' FIRST %d' % (fetch_amt, ) 3788 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3789
3790 - def represent_exceptions(self, obj, fieldtype):
3791 if fieldtype == 'date': 3792 if isinstance(obj, (datetime.date, datetime.datetime)): 3793 obj = obj.isoformat()[:10] 3794 else: 3795 obj = str(obj) 3796 return "to_date('%s','%%Y-%%m-%%d')" % obj 3797 elif fieldtype == 'datetime': 3798 if isinstance(obj, datetime.datetime): 3799 obj = obj.isoformat()[:19].replace('T',' ') 3800 elif isinstance(obj, datetime.date): 3801 obj = obj.isoformat()[:10]+' 00:00:00' 3802 else: 3803 obj = str(obj) 3804 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3805 return None
3806 3807 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3808
3809 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3810 credential_decoder=IDENTITY, driver_args={}, 3811 adapter_args={}, do_connect=True, after_connection=None):
3812 self.db = db 3813 self.dbengine = "informix" 3814 self.uri = uri 3815 if do_connect: self.find_driver(adapter_args,uri) 3816 self.pool_size = pool_size 3817 self.folder = folder 3818 self.db_codec = db_codec 3819 self._after_connection = after_connection 3820 self.find_or_make_work_folder() 3821 ruri = uri.split('://',1)[1] 3822 m = self.REGEX_URI.match(ruri) 3823 if not m: 3824 raise SyntaxError( 3825 "Invalid URI string in DAL: %s" % self.uri) 3826 user = credential_decoder(m.group('user')) 3827 if not user: 3828 raise SyntaxError('User required') 3829 password = credential_decoder(m.group('password')) 3830 if not password: 3831 password = '' 3832 host = m.group('host') 3833 if not host: 3834 raise SyntaxError('Host name required') 3835 db = m.group('db') 3836 if not db: 3837 raise SyntaxError('Database name required') 3838 user = credential_decoder(user) 3839 password = credential_decoder(password) 3840 dsn = '%s@%s' % (db,host) 3841 driver_args.update(user=user,password=password,autocommit=True) 3842 def connector(dsn=dsn,driver_args=driver_args): 3843 return self.driver.connect(dsn,**driver_args)
3844 self.connector = connector 3845 if do_connect: self.reconnect()
3846
3847 - def execute(self,command):
3848 if command[-1:]==';': 3849 command = command[:-1] 3850 return self.log_execute(command)
3851
3852 - def lastrowid(self,table):
3853 return self.cursor.sqlerrd[1]
3854
3855 -class InformixSEAdapter(InformixAdapter):
3856 """ work in progress """ 3857
3858 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3859 return 'SELECT %s %s FROM %s%s%s;' % \ 3860 (sql_s, sql_f, sql_t, sql_w, sql_o)
3861
3862 - def rowslice(self,rows,minimum=0,maximum=None):
3863 if maximum is None: 3864 return rows[minimum:] 3865 return rows[minimum:maximum]
3866
3867 -class DB2Adapter(BaseAdapter):
3868 drivers = ('pyodbc',) 3869 3870 types = { 3871 'boolean': 'CHAR(1)', 3872 'string': 'VARCHAR(%(length)s)', 3873 'text': 'CLOB', 3874 'json': 'CLOB', 3875 'password': 'VARCHAR(%(length)s)', 3876 'blob': 'BLOB', 3877 'upload': 'VARCHAR(%(length)s)', 3878 'integer': 'INT', 3879 'bigint': 'BIGINT', 3880 'float': 'REAL', 3881 'double': 'DOUBLE', 3882 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3883 'date': 'DATE', 3884 'time': 'TIME', 3885 'datetime': 'TIMESTAMP', 3886 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3887 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3888 'list:integer': 'CLOB', 3889 'list:string': 'CLOB', 3890 'list:reference': 'CLOB', 3891 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3892 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3893 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3894 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3895 } 3896
3897 - def LEFT_JOIN(self):
3898 return 'LEFT OUTER JOIN'
3899
3900 - def RANDOM(self):
3901 return 'RAND()'
3902
3903 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3904 if limitby: 3905 (lmin, lmax) = limitby 3906 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3907 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3908
3909 - def represent_exceptions(self, obj, fieldtype):
3910 if fieldtype == 'blob': 3911 obj = base64.b64encode(str(obj)) 3912 return "BLOB('%s')" % obj 3913 elif fieldtype == 'datetime': 3914 if isinstance(obj, datetime.datetime): 3915 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3916 elif isinstance(obj, datetime.date): 3917 obj = obj.isoformat()[:10]+'-00.00.00' 3918 return "'%s'" % obj 3919 return None
3920
3921 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3922 credential_decoder=IDENTITY, driver_args={}, 3923 adapter_args={}, do_connect=True, after_connection=None):
3924 self.db = db 3925 self.dbengine = "db2" 3926 self.uri = uri 3927 if do_connect: self.find_driver(adapter_args,uri) 3928 self.pool_size = pool_size 3929 self.folder = folder 3930 self.db_codec = db_codec 3931 self._after_connection = after_connection 3932 self.find_or_make_work_folder() 3933 ruri = uri.split('://', 1)[1] 3934 def connector(cnxn=ruri,driver_args=driver_args): 3935 return self.driver.connect(cnxn,**driver_args)
3936 self.connector = connector 3937 if do_connect: self.reconnect()
3938
3939 - def execute(self,command):
3940 if command[-1:]==';': 3941 command = command[:-1] 3942 return self.log_execute(command)
3943
3944 - def lastrowid(self,table):
3945 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3946 return long(self.cursor.fetchone()[0])
3947
3948 - def rowslice(self,rows,minimum=0,maximum=None):
3949 if maximum is None: 3950 return rows[minimum:] 3951 return rows[minimum:maximum]
3952
3953 3954 -class TeradataAdapter(BaseAdapter):
3955 drivers = ('pyodbc',) 3956 3957 types = { 3958 'boolean': 'CHAR(1)', 3959 'string': 'VARCHAR(%(length)s)', 3960 'text': 'CLOB', 3961 'json': 'CLOB', 3962 'password': 'VARCHAR(%(length)s)', 3963 'blob': 'BLOB', 3964 'upload': 'VARCHAR(%(length)s)', 3965 'integer': 'INT', 3966 'bigint': 'BIGINT', 3967 'float': 'REAL', 3968 'double': 'DOUBLE', 3969 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3970 'date': 'DATE', 3971 'time': 'TIME', 3972 'datetime': 'TIMESTAMP', 3973 # Modified Constraint syntax for Teradata. 3974 # Teradata does not support ON DELETE. 3975 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3976 'reference': 'INT', 3977 'list:integer': 'CLOB', 3978 'list:string': 'CLOB', 3979 'list:reference': 'CLOB', 3980 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3981 'big-reference': 'BIGINT', 3982 'reference FK': ' REFERENCES %(foreign_key)s', 3983 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3984 } 3985
3986 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3987 credential_decoder=IDENTITY, driver_args={}, 3988 adapter_args={}, do_connect=True, after_connection=None):
3989 self.db = db 3990 self.dbengine = "teradata" 3991 self.uri = uri 3992 if do_connect: self.find_driver(adapter_args,uri) 3993 self.pool_size = pool_size 3994 self.folder = folder 3995 self.db_codec = db_codec 3996 self._after_connection = after_connection 3997 self.find_or_make_work_folder() 3998 ruri = uri.split('://', 1)[1] 3999 def connector(cnxn=ruri,driver_args=driver_args): 4000 return self.driver.connect(cnxn,**driver_args)
4001 self.connector = connector 4002 if do_connect: self.reconnect()
4003
4004 - def LEFT_JOIN(self):
4005 return 'LEFT OUTER JOIN'
4006 4007 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
4008 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4009 if limitby: 4010 (lmin, lmax) = limitby 4011 sql_s += ' TOP %i' % lmax 4012 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4013
4014 - def _truncate(self, table, mode=''):
4015 tablename = table._tablename 4016 return ['DELETE FROM %s ALL;' % (tablename)]
4017 4018 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
4019 # (ANSI-SQL wants this form of name 4020 # to be a delimited identifier) 4021 4022 -class IngresAdapter(BaseAdapter):
4023 drivers = ('pyodbc',) 4024 4025 types = { 4026 'boolean': 'CHAR(1)', 4027 'string': 'VARCHAR(%(length)s)', 4028 'text': 'CLOB', 4029 'json': 'CLOB', 4030 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4031 'blob': 'BLOB', 4032 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4033 'integer': 'INTEGER4', # or int8... 4034 'bigint': 'BIGINT', 4035 'float': 'FLOAT', 4036 'double': 'FLOAT8', 4037 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4038 'date': 'ANSIDATE', 4039 'time': 'TIME WITHOUT TIME ZONE', 4040 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4041 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 4042 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4043 'list:integer': 'CLOB', 4044 'list:string': 'CLOB', 4045 'list:reference': 'CLOB', 4046 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4047 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4048 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4049 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4050 } 4051
4052 - def LEFT_JOIN(self):
4053 return 'LEFT OUTER JOIN'
4054
4055 - def RANDOM(self):
4056 return 'RANDOM()'
4057
4058 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4059 if limitby: 4060 (lmin, lmax) = limitby 4061 fetch_amt = lmax - lmin 4062 if fetch_amt: 4063 sql_s += ' FIRST %d ' % (fetch_amt, ) 4064 if lmin: 4065 # Requires Ingres 9.2+ 4066 sql_o += ' OFFSET %d' % (lmin, ) 4067 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4068
4069 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4070 credential_decoder=IDENTITY, driver_args={}, 4071 adapter_args={}, do_connect=True, after_connection=None):
4072 self.db = db 4073 self.dbengine = "ingres" 4074 self._driver = pyodbc 4075 self.uri = uri 4076 if do_connect: self.find_driver(adapter_args,uri) 4077 self.pool_size = pool_size 4078 self.folder = folder 4079 self.db_codec = db_codec 4080 self._after_connection = after_connection 4081 self.find_or_make_work_folder() 4082 connstr = uri.split(':', 1)[1] 4083 # Simple URI processing 4084 connstr = connstr.lstrip() 4085 while connstr.startswith('/'): 4086 connstr = connstr[1:] 4087 if '=' in connstr: 4088 # Assume we have a regular ODBC connection string and just use it 4089 ruri = connstr 4090 else: 4091 # Assume only (local) dbname is passed in with OS auth 4092 database_name = connstr 4093 default_driver_name = 'Ingres' 4094 vnode = '(local)' 4095 servertype = 'ingres' 4096 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4097 def connector(cnxn=ruri,driver_args=driver_args): 4098 return self.driver.connect(cnxn,**driver_args)
4099 4100 self.connector = connector 4101 4102 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4103 if do_connect: self.reconnect()
4104
4105 - def create_sequence_and_triggers(self, query, table, **args):
4106 # post create table auto inc code (if needed) 4107 # modify table to btree for performance.... 4108 # Older Ingres releases could use rule/trigger like Oracle above. 4109 if hasattr(table,'_primarykey'): 4110 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4111 (table._tablename, 4112 ', '.join(["'%s'" % x for x in table.primarykey])) 4113 self.execute(modify_tbl_sql) 4114 else: 4115 tmp_seqname='%s_iisq' % table._tablename 4116 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4117 self.execute('create sequence %s' % tmp_seqname) 4118 self.execute(query) 4119 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4120 4121
4122 - def lastrowid(self,table):
4123 tmp_seqname='%s_iisq' % table 4124 self.execute('select current value for %s' % tmp_seqname) 4125 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4126
4127 4128 -class IngresUnicodeAdapter(IngresAdapter):
4129 4130 drivers = ('pyodbc',) 4131 4132 types = { 4133 'boolean': 'CHAR(1)', 4134 'string': 'NVARCHAR(%(length)s)', 4135 'text': 'NCLOB', 4136 'json': 'NCLOB', 4137 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4138 'blob': 'BLOB', 4139 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4140 'integer': 'INTEGER4', # or int8... 4141 'bigint': 'BIGINT', 4142 'float': 'FLOAT', 4143 'double': 'FLOAT8', 4144 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4145 'date': 'ANSIDATE', 4146 'time': 'TIME WITHOUT TIME ZONE', 4147 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4148 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4149 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4150 'list:integer': 'NCLOB', 4151 'list:string': 'NCLOB', 4152 'list:reference': 'NCLOB', 4153 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4154 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4155 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4156 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4157 }
4158
4159 -class SAPDBAdapter(BaseAdapter):
4160 drivers = ('sapdb',) 4161 4162 support_distributed_transaction = False 4163 types = { 4164 'boolean': 'CHAR(1)', 4165 'string': 'VARCHAR(%(length)s)', 4166 'text': 'LONG', 4167 'json': 'LONG', 4168 'password': 'VARCHAR(%(length)s)', 4169 'blob': 'LONG', 4170 'upload': 'VARCHAR(%(length)s)', 4171 'integer': 'INT', 4172 'bigint': 'BIGINT', 4173 'float': 'FLOAT', 4174 'double': 'DOUBLE PRECISION', 4175 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4176 'date': 'DATE', 4177 'time': 'TIME', 4178 'datetime': 'TIMESTAMP', 4179 'id': 'INT PRIMARY KEY', 4180 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4181 'list:integer': 'LONG', 4182 'list:string': 'LONG', 4183 'list:reference': 'LONG', 4184 'big-id': 'BIGINT PRIMARY KEY', 4185 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4186 } 4187
4188 - def sequence_name(self,table):
4189 return '%s_id_Seq' % table
4190
4191 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4192 if limitby: 4193 (lmin, lmax) = limitby 4194 if len(sql_w) > 1: 4195 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4196 else: 4197 sql_w_row = 'WHERE w_row > %i' % lmin 4198 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4199 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4200
4201 - def create_sequence_and_triggers(self, query, table, **args):
4202 # following lines should only be executed if table._sequence_name does not exist 4203 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4204 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4205 % (table._tablename, table._id.name, table._sequence_name)) 4206 self.execute(query)
4207 4208 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4209 4210
4211 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4212 credential_decoder=IDENTITY, driver_args={}, 4213 adapter_args={}, do_connect=True, after_connection=None):
4214 self.db = db 4215 self.dbengine = "sapdb" 4216 self.uri = uri 4217 if do_connect: self.find_driver(adapter_args,uri) 4218 self.pool_size = pool_size 4219 self.folder = folder 4220 self.db_codec = db_codec 4221 self._after_connection = after_connection 4222 self.find_or_make_work_folder() 4223 ruri = uri.split('://',1)[1] 4224 m = self.REGEX_URI.match(ruri) 4225 if not m: 4226 raise SyntaxError("Invalid URI string in DAL") 4227 user = credential_decoder(m.group('user')) 4228 if not user: 4229 raise SyntaxError('User required') 4230 password = credential_decoder(m.group('password')) 4231 if not password: 4232 password = '' 4233 host = m.group('host') 4234 if not host: 4235 raise SyntaxError('Host name required') 4236 db = m.group('db') 4237 if not db: 4238 raise SyntaxError('Database name required') 4239 def connector(user=user, password=password, database=db, 4240 host=host, driver_args=driver_args): 4241 return self.driver.Connection(user, password, database, 4242 host, **driver_args)
4243 self.connector = connector 4244 if do_connect: self.reconnect()
4245
4246 - def lastrowid(self,table):
4247 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4248 return long(self.cursor.fetchone()[0])
4249
4250 -class CubridAdapter(MySQLAdapter):
4251 drivers = ('cubriddb',) 4252 4253 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4254
4255 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4256 credential_decoder=IDENTITY, driver_args={}, 4257 adapter_args={}, do_connect=True, after_connection=None):
4258 self.db = db 4259 self.dbengine = "cubrid" 4260 self.uri = uri 4261 if do_connect: self.find_driver(adapter_args,uri) 4262 self.pool_size = pool_size 4263 self.folder = folder 4264 self.db_codec = db_codec 4265 self._after_connection = after_connection 4266 self.find_or_make_work_folder() 4267 ruri = uri.split('://',1)[1] 4268 m = self.REGEX_URI.match(ruri) 4269 if not m: 4270 raise SyntaxError( 4271 "Invalid URI string in DAL: %s" % self.uri) 4272 user = credential_decoder(m.group('user')) 4273 if not user: 4274 raise SyntaxError('User required') 4275 password = credential_decoder(m.group('password')) 4276 if not password: 4277 password = '' 4278 host = m.group('host') 4279 if not host: 4280 raise SyntaxError('Host name required') 4281 db = m.group('db') 4282 if not db: 4283 raise SyntaxError('Database name required') 4284 port = int(m.group('port') or '30000') 4285 charset = m.group('charset') or 'utf8' 4286 user = credential_decoder(user) 4287 passwd = credential_decoder(password) 4288 def connector(host=host,port=port,db=db, 4289 user=user,passwd=password,driver_args=driver_args): 4290 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4291 self.connector = connector 4292 if do_connect: self.reconnect()
4293
4294 - def after_connection(self):
4295 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4296 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4297
4298 4299 ######## GAE MySQL ########## 4300 4301 -class DatabaseStoredFile:
4302 4303 web2py_filesystem = False 4304
4305 - def escape(self,obj):
4306 return self.db._adapter.escape(obj)
4307
4308 - def __init__(self,db,filename,mode):
4309 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4310 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4311 self.db = db 4312 self.filename = filename 4313 self.mode = mode 4314 if not self.web2py_filesystem: 4315 if db._adapter.dbengine == 'mysql': 4316 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4317 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4318 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4319 self.db.executesql(sql) 4320 DatabaseStoredFile.web2py_filesystem = True 4321 self.p=0 4322 self.data = '' 4323 if mode in ('r','rw','a'): 4324 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4325 % filename 4326 rows = self.db.executesql(query) 4327 if rows: 4328 self.data = rows[0][0] 4329 elif exists(filename): 4330 datafile = open(filename, 'r') 4331 try: 4332 self.data = datafile.read() 4333 finally: 4334 datafile.close() 4335 elif mode in ('r','rw'): 4336 raise RuntimeError("File %s does not exist" % filename)
4337
4338 - def read(self, bytes):
4339 data = self.data[self.p:self.p+bytes] 4340 self.p += len(data) 4341 return data
4342
4343 - def readline(self):
4344 i = self.data.find('\n',self.p)+1 4345 if i>0: 4346 data, self.p = self.data[self.p:i], i 4347 else: 4348 data, self.p = self.data[self.p:], len(self.data) 4349 return data
4350
4351 - def write(self,data):
4352 self.data += data
4353
4354 - def close_connection(self):
4355 if self.db is not None: 4356 self.db.executesql( 4357 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4358 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4359 % (self.filename, self.data.replace("'","''")) 4360 self.db.executesql(query) 4361 self.db.commit() 4362 self.db = None
4363
4364 - def close(self):
4365 self.close_connection()
4366 4367 @staticmethod
4368 - def exists(db, filename):
4369 if exists(filename): 4370 return True 4371 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4372 try: 4373 if db.executesql(query): 4374 return True 4375 except Exception, e: 4376 if not (db._adapter.isOperationalError(e) or 4377 db._adapter.isProgrammingError(e)): 4378 raise 4379 # no web2py_filesystem found? 4380 tb = traceback.format_exc() 4381 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4382 return False
4383
4384 4385 -class UseDatabaseStoredFile:
4386
4387 - def file_exists(self, filename):
4388 return DatabaseStoredFile.exists(self.db,filename)
4389
4390 - def file_open(self, filename, mode='rb', lock=True):
4391 return DatabaseStoredFile(self.db,filename,mode)
4392
4393 - def file_close(self, fileobj):
4394 fileobj.close_connection()
4395
4396 - def file_delete(self,filename):
4397 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4398 self.db.executesql(query) 4399 self.db.commit()
4400
4401 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4402 uploads_in_blob = True 4403 4404 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4405
4406 - def __init__(self, db, uri='google:sql://realm:domain/database', 4407 pool_size=0, folder=None, db_codec='UTF-8', 4408 credential_decoder=IDENTITY, driver_args={}, 4409 adapter_args={}, do_connect=True, after_connection=None):
4410 4411 self.db = db 4412 self.dbengine = "mysql" 4413 self.uri = uri 4414 self.pool_size = pool_size 4415 self.db_codec = db_codec 4416 self._after_connection = after_connection 4417 if do_connect: self.find_driver(adapter_args, uri) 4418 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4419 os.sep+'applications'+os.sep,1)[1]) 4420 ruri = uri.split("://")[1] 4421 m = self.REGEX_URI.match(ruri) 4422 if not m: 4423 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4424 instance = credential_decoder(m.group('instance')) 4425 self.dbstring = db = credential_decoder(m.group('db')) 4426 driver_args['instance'] = instance 4427 if not 'charset' in driver_args: 4428 driver_args['charset'] = 'utf8' 4429 self.createdb = createdb = adapter_args.get('createdb',True) 4430 if not createdb: 4431 driver_args['database'] = db 4432 def connector(driver_args=driver_args): 4433 return rdbms.connect(**driver_args)
4434 self.connector = connector 4435 if do_connect: self.reconnect()
4436
4437 - def after_connection(self):
4438 if self.createdb: 4439 # self.execute('DROP DATABASE %s' % self.dbstring) 4440 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4441 self.execute('USE %s' % self.dbstring) 4442 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4443 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4444
4445 - def execute(self, command, *a, **b):
4446 return self.log_execute(command.decode('utf8'), *a, **b)
4447
4448 - def find_driver(self,adapter_args,uri=None):
4449 self.adapter_args = adapter_args 4450 self.driver = "google"
4451
4452 -class NoSQLAdapter(BaseAdapter):
4453 can_select_for_update = False 4454 4455 @staticmethod
4456 - def to_unicode(obj):
4457 if isinstance(obj, str): 4458 return obj.decode('utf8') 4459 elif not isinstance(obj, unicode): 4460 return unicode(obj) 4461 return obj
4462
4463 - def id_query(self, table):
4464 return table._id > 0
4465
4466 - def represent(self, obj, fieldtype):
4467 field_is_type = fieldtype.startswith 4468 if isinstance(obj, CALLABLETYPES): 4469 obj = obj() 4470 if isinstance(fieldtype, SQLCustomType): 4471 return fieldtype.encoder(obj) 4472 if isinstance(obj, (Expression, Field)): 4473 raise SyntaxError("non supported on GAE") 4474 if self.dbengine == 'google:datastore': 4475 if isinstance(fieldtype, gae.Property): 4476 return obj 4477 is_string = isinstance(fieldtype,str) 4478 is_list = is_string and field_is_type('list:') 4479 if is_list: 4480 if not obj: 4481 obj = [] 4482 if not isinstance(obj, (list, tuple)): 4483 obj = [obj] 4484 if obj == '' and not \ 4485 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4486 return None 4487 if not obj is None: 4488 if isinstance(obj, list) and not is_list: 4489 obj = [self.represent(o, fieldtype) for o in obj] 4490 elif fieldtype in ('integer','bigint','id'): 4491 obj = long(obj) 4492 elif fieldtype == 'double': 4493 obj = float(obj) 4494 elif is_string and field_is_type('reference'): 4495 if isinstance(obj, (Row, Reference)): 4496 obj = obj['id'] 4497 obj = long(obj) 4498 elif fieldtype == 'boolean': 4499 if obj and not str(obj)[0].upper() in '0F': 4500 obj = True 4501 else: 4502 obj = False 4503 elif fieldtype == 'date': 4504 if not isinstance(obj, datetime.date): 4505 (y, m, d) = map(int,str(obj).strip().split('-')) 4506 obj = datetime.date(y, m, d) 4507 elif isinstance(obj,datetime.datetime): 4508 (y, m, d) = (obj.year, obj.month, obj.day) 4509 obj = datetime.date(y, m, d) 4510 elif fieldtype == 'time': 4511 if not isinstance(obj, datetime.time): 4512 time_items = map(int,str(obj).strip().split(':')[:3]) 4513 if len(time_items) == 3: 4514 (h, mi, s) = time_items 4515 else: 4516 (h, mi, s) = time_items + [0] 4517 obj = datetime.time(h, mi, s) 4518 elif fieldtype == 'datetime': 4519 if not isinstance(obj, datetime.datetime): 4520 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4521 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4522 while len(time_items)<3: 4523 time_items.append(0) 4524 (h, mi, s) = time_items 4525 obj = datetime.datetime(y, m, d, h, mi, s) 4526 elif fieldtype == 'blob': 4527 pass 4528 elif fieldtype == 'json': 4529 if isinstance(obj, basestring): 4530 obj = self.to_unicode(obj) 4531 if have_serializers: 4532 obj = serializers.loads_json(obj) 4533 elif simplejson: 4534 obj = simplejson.loads(obj) 4535 else: 4536 raise RuntimeError("missing simplejson") 4537 elif is_string and field_is_type('list:string'): 4538 return map(self.to_unicode,obj) 4539 elif is_list: 4540 return map(int,obj) 4541 else: 4542 obj = self.to_unicode(obj) 4543 return obj
4544
4545 - def _insert(self,table,fields):
4546 return 'insert %s in %s' % (fields, table)
4547
4548 - def _count(self,query,distinct=None):
4549 return 'count %s' % repr(query)
4550
4551 - def _select(self,query,fields,attributes):
4552 return 'select %s where %s' % (repr(fields), repr(query))
4553
4554 - def _delete(self,tablename, query):
4555 return 'delete %s where %s' % (repr(tablename),repr(query))
4556
4557 - def _update(self,tablename,query,fields):
4558 return 'update %s (%s) where %s' % (repr(tablename), 4559 repr(fields),repr(query))
4560
4561 - def commit(self):
4562 """ 4563 remember: no transactions on many NoSQL 4564 """ 4565 pass
4566
4567 - def rollback(self):
4568 """ 4569 remember: no transactions on many NoSQL 4570 """ 4571 pass
4572
4573 - def close_connection(self):
4574 """ 4575 remember: no transactions on many NoSQL 4576 """ 4577 pass
4578 4579 4580 # these functions should never be called!
4581 - def OR(self,first,second): raise SyntaxError("Not supported")
4582 - def AND(self,first,second): raise SyntaxError("Not supported")
4583 - def AS(self,first,second): raise SyntaxError("Not supported")
4584 - def ON(self,first,second): raise SyntaxError("Not supported")
4585 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4586 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4587 - def ADD(self,first,second): raise SyntaxError("Not supported")
4588 - def SUB(self,first,second): raise SyntaxError("Not supported")
4589 - def MUL(self,first,second): raise SyntaxError("Not supported")
4590 - def DIV(self,first,second): raise SyntaxError("Not supported")
4591 - def LOWER(self,first): raise SyntaxError("Not supported")
4592 - def UPPER(self,first): raise SyntaxError("Not supported")
4593 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4594 - def LENGTH(self, first): raise SyntaxError("Not supported")
4595 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4596 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4597 - def RANDOM(self): raise SyntaxError("Not supported")
4598 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4599 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4600 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4601 - def drop(self,table,mode): raise SyntaxError("Not supported")
4602 - def alias(self,table,alias): raise SyntaxError("Not supported")
4603 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4604 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4605 - def prepare(self,key): raise SyntaxError("Not supported")
4606 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4607 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4608 - def concat_add(self,table): raise SyntaxError("Not supported")
4609 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4610 - def create_sequence_and_triggers(self, query, table, **args): pass
4611 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4612 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4613 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4614 - def lastrowid(self,table): raise SyntaxError("Not supported")
4615 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4616
4617 4618 -class GAEF(object):
4619 - def __init__(self,name,op,value,apply):
4620 self.name=name=='id' and '__key__' or name 4621 self.op=op 4622 self.value=value 4623 self.apply=apply
4624 - def __repr__(self):
4625 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4626
4627 -class GoogleDatastoreAdapter(NoSQLAdapter):
4628 uploads_in_blob = True 4629 types = {} 4630
4631 - def file_exists(self, filename): pass
4632 - def file_open(self, filename, mode='rb', lock=True): pass
4633 - def file_close(self, fileobj): pass
4634 4635 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4636
4637 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4638 credential_decoder=IDENTITY, driver_args={}, 4639 adapter_args={}, do_connect=True, after_connection=None):
4640 self.types.update({ 4641 'boolean': gae.BooleanProperty, 4642 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4643 'text': gae.TextProperty, 4644 'json': gae.TextProperty, 4645 'password': gae.StringProperty, 4646 'blob': gae.BlobProperty, 4647 'upload': gae.StringProperty, 4648 'integer': gae.IntegerProperty, 4649 'bigint': gae.IntegerProperty, 4650 'float': gae.FloatProperty, 4651 'double': gae.FloatProperty, 4652 'decimal': GAEDecimalProperty, 4653 'date': gae.DateProperty, 4654 'time': gae.TimeProperty, 4655 'datetime': gae.DateTimeProperty, 4656 'id': None, 4657 'reference': gae.IntegerProperty, 4658 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4659 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4660 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4661 }) 4662 self.db = db 4663 self.uri = uri 4664 self.dbengine = 'google:datastore' 4665 self.folder = folder 4666 db['_lastsql'] = '' 4667 self.db_codec = 'UTF-8' 4668 self._after_connection = after_connection 4669 self.pool_size = 0 4670 match = self.REGEX_NAMESPACE.match(uri) 4671 if match: 4672 namespace_manager.set_namespace(match.group('namespace'))
4673
4674 - def parse_id(self, value, field_type):
4675 return value
4676
4677 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4678 myfields = {} 4679 for field in table: 4680 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4681 continue 4682 attr = {} 4683 if isinstance(field.custom_qualifier, dict): 4684 #this is custom properties to add to the GAE field declartion 4685 attr = field.custom_qualifier 4686 field_type = field.type 4687 if isinstance(field_type, SQLCustomType): 4688 ftype = self.types[field_type.native or field_type.type](**attr) 4689 elif isinstance(field_type, gae.Property): 4690 ftype = field_type 4691 elif field_type.startswith('id'): 4692 continue 4693 elif field_type.startswith('decimal'): 4694 precision, scale = field_type[7:].strip('()').split(',') 4695 precision = int(precision) 4696 scale = int(scale) 4697 ftype = GAEDecimalProperty(precision, scale, **attr) 4698 elif field_type.startswith('reference'): 4699 if field.notnull: 4700 attr = dict(required=True) 4701 referenced = field_type[10:].strip() 4702 ftype = self.types[field_type[:9]](referenced, **attr) 4703 elif field_type.startswith('list:reference'): 4704 if field.notnull: 4705 attr['required'] = True 4706 referenced = field_type[15:].strip() 4707 ftype = self.types[field_type[:14]](**attr) 4708 elif field_type.startswith('list:'): 4709 ftype = self.types[field_type](**attr) 4710 elif not field_type in self.types\ 4711 or not self.types[field_type]: 4712 raise SyntaxError('Field: unknown field type: %s' % field_type) 4713 else: 4714 ftype = self.types[field_type](**attr) 4715 myfields[field.name] = ftype 4716 if not polymodel: 4717 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4718 elif polymodel==True: 4719 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4720 elif isinstance(polymodel,Table): 4721 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4722 else: 4723 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4724 return None
4725
4726 - def expand(self,expression,field_type=None):
4727 if isinstance(expression,Field): 4728 if expression.type in ('text', 'blob', 'json'): 4729 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4730 return expression.name 4731 elif isinstance(expression, (Expression, Query)): 4732 if not expression.second is None: 4733 return expression.op(expression.first, expression.second) 4734 elif not expression.first is None: 4735 return expression.op(expression.first) 4736 else: 4737 return expression.op() 4738 elif field_type: 4739 return self.represent(expression,field_type) 4740 elif isinstance(expression,(list,tuple)): 4741 return ','.join([self.represent(item,field_type) for item in expression]) 4742 else: 4743 return str(expression)
4744 4745 ### TODO from gql.py Expression
4746 - def AND(self,first,second):
4747 a = self.expand(first) 4748 b = self.expand(second) 4749 if b[0].name=='__key__' and a[0].name!='__key__': 4750 return b+a 4751 return a+b
4752
4753 - def EQ(self,first,second=None):
4754 if isinstance(second, Key): 4755 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4756 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4757
4758 - def NE(self,first,second=None):
4759 if first.type != 'id': 4760 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4761 else: 4762 if not second is None: 4763 second = Key.from_path(first._tablename, long(second)) 4764 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4765
4766 - def LT(self,first,second=None):
4767 if first.type != 'id': 4768 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4769 else: 4770 second = Key.from_path(first._tablename, long(second)) 4771 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4772
4773 - def LE(self,first,second=None):
4774 if first.type != 'id': 4775 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4776 else: 4777 second = Key.from_path(first._tablename, long(second)) 4778 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4779
4780 - def GT(self,first,second=None):
4781 if first.type != 'id' or second==0 or second == '0': 4782 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4783 else: 4784 second = Key.from_path(first._tablename, long(second)) 4785 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4786
4787 - def GE(self,first,second=None):
4788 if first.type != 'id': 4789 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4790 else: 4791 second = Key.from_path(first._tablename, long(second)) 4792 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4793
4794 - def INVERT(self,first):
4795 return '-%s' % first.name
4796
4797 - def COMMA(self,first,second):
4798 return '%s, %s' % (self.expand(first),self.expand(second))
4799
4800 - def BELONGS(self,first,second=None):
4801 if not isinstance(second,(list, tuple)): 4802 raise SyntaxError("Not supported") 4803 if first.type != 'id': 4804 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4805 else: 4806 second = [Key.from_path(first._tablename, int(i)) for i in second] 4807 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4808
4809 - def CONTAINS(self,first,second,case_sensitive=False):
4810 # silently ignoring: GAE can only do case sensitive matches! 4811 if not first.type.startswith('list:'): 4812 raise SyntaxError("Not supported") 4813 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4814
4815 - def NOT(self,first):
4816 nops = { self.EQ: self.NE, 4817 self.NE: self.EQ, 4818 self.LT: self.GE, 4819 self.GT: self.LE, 4820 self.LE: self.GT, 4821 self.GE: self.LT} 4822 if not isinstance(first,Query): 4823 raise SyntaxError("Not suported") 4824 nop = nops.get(first.op,None) 4825 if not nop: 4826 raise SyntaxError("Not suported %s" % first.op.__name__) 4827 first.op = nop 4828 return self.expand(first)
4829
4830 - def truncate(self,table,mode):
4831 self.db(self.db._adapter.id_query(table)).delete()
4832
4833 - def select_raw(self,query,fields=None,attributes=None):
4834 db = self.db 4835 fields = fields or [] 4836 attributes = attributes or {} 4837 args_get = attributes.get 4838 new_fields = [] 4839 for item in fields: 4840 if isinstance(item,SQLALL): 4841 new_fields += item._table 4842 else: 4843 new_fields.append(item) 4844 fields = new_fields 4845 if query: 4846 tablename = self.get_table(query) 4847 elif fields: 4848 tablename = fields[0].tablename 4849 query = db._adapter.id_query(fields[0].table) 4850 else: 4851 raise SyntaxError("Unable to determine a tablename") 4852 4853 if query: 4854 if use_common_filters(query): 4855 query = self.common_filter(query,[tablename]) 4856 4857 #tableobj is a GAE Model class (or subclass) 4858 tableobj = db[tablename]._tableobj 4859 filters = self.expand(query) 4860 4861 projection = None 4862 if len(db[tablename].fields) == len(fields): 4863 #getting all fields, not a projection query 4864 projection = None 4865 elif args_get('projection') == True: 4866 projection = [] 4867 for f in fields: 4868 if f.type in ['text', 'blob', 'json']: 4869 raise SyntaxError( 4870 "text and blob field types not allowed in projection queries") 4871 else: 4872 projection.append(f.name) 4873 elif args_get('filterfields') == True: 4874 projection = [] 4875 for f in fields: 4876 projection.append(f.name) 4877 4878 # real projection's can't include 'id'. 4879 # it will be added to the result later 4880 query_projection = [ 4881 p for p in projection if \ 4882 p != db[tablename]._id.name] if projection and \ 4883 args_get('projection') == True\ 4884 else None 4885 4886 cursor = None 4887 if isinstance(args_get('reusecursor'), str): 4888 cursor = args_get('reusecursor') 4889 items = gae.Query(tableobj, projection=query_projection, 4890 cursor=cursor) 4891 4892 for filter in filters: 4893 if args_get('projection') == True and \ 4894 filter.name in query_projection and \ 4895 filter.op in ['=', '<=', '>=']: 4896 raise SyntaxError( 4897 "projection fields cannot have equality filters") 4898 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4899 continue 4900 elif filter.name=='__key__' and filter.op=='=': 4901 if filter.value==0: 4902 items = [] 4903 elif isinstance(filter.value, Key): 4904 # key qeuries return a class instance, 4905 # can't use projection 4906 # extra values will be ignored in post-processing later 4907 item = tableobj.get(filter.value) 4908 items = (item and [item]) or [] 4909 else: 4910 # key qeuries return a class instance, 4911 # can't use projection 4912 # extra values will be ignored in post-processing later 4913 item = tableobj.get_by_id(filter.value) 4914 items = (item and [item]) or [] 4915 elif isinstance(items,list): # i.e. there is a single record! 4916 items = [i for i in items if filter.apply( 4917 getattr(item,filter.name),filter.value)] 4918 else: 4919 if filter.name=='__key__' and filter.op != 'in': 4920 items.order('__key__') 4921 items = items.filter('%s %s' % (filter.name,filter.op), 4922 filter.value) 4923 if not isinstance(items,list): 4924 if args_get('left', None): 4925 raise SyntaxError('Set: no left join in appengine') 4926 if args_get('groupby', None): 4927 raise SyntaxError('Set: no groupby in appengine') 4928 orderby = args_get('orderby', False) 4929 if orderby: 4930 ### THIS REALLY NEEDS IMPROVEMENT !!! 4931 if isinstance(orderby, (list, tuple)): 4932 orderby = xorify(orderby) 4933 if isinstance(orderby,Expression): 4934 orderby = self.expand(orderby) 4935 orders = orderby.split(', ') 4936 for order in orders: 4937 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4938 items = items.order(order) 4939 if args_get('limitby', None): 4940 (lmin, lmax) = attributes['limitby'] 4941 (limit, offset) = (lmax - lmin, lmin) 4942 rows = items.fetch(limit,offset=offset) 4943 #cursor is only useful if there was a limit and we didn't return 4944 # all results 4945 if args_get('reusecursor'): 4946 db['_lastcursor'] = items.cursor() 4947 items = rows 4948 return (items, tablename, projection or db[tablename].fields)
4949
4950 - def select(self,query,fields,attributes):
4951 """ 4952 This is the GAE version of select. some notes to consider: 4953 - db['_lastsql'] is not set because there is not SQL statement string 4954 for a GAE query 4955 - 'nativeRef' is a magical fieldname used for self references on GAE 4956 - optional attribute 'projection' when set to True will trigger 4957 use of the GAE projection queries. note that there are rules for 4958 what is accepted imposed by GAE: each field must be indexed, 4959 projection queries cannot contain blob or text fields, and you 4960 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4961 - optional attribute 'filterfields' when set to True web2py will only 4962 parse the explicitly listed fields into the Rows object, even though 4963 all fields are returned in the query. This can be used to reduce 4964 memory usage in cases where true projection queries are not 4965 usable. 4966 - optional attribute 'reusecursor' allows use of cursor with queries 4967 that have the limitby attribute. Set the attribute to True for the 4968 first query, set it to the value of db['_lastcursor'] to continue 4969 a previous query. The user must save the cursor value between 4970 requests, and the filters must be identical. It is up to the user 4971 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4972 """ 4973 4974 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4975 # self.db['_lastsql'] = self._select(query,fields,attributes) 4976 rows = [[(t==self.db[tablename]._id.name and item) or \ 4977 (t=='nativeRef' and item) or getattr(item, t) \ 4978 for t in fields] for item in items] 4979 colnames = ['%s.%s' % (tablename, t) for t in fields] 4980 processor = attributes.get('processor',self.parse) 4981 return processor(rows,fields,colnames,False)
4982
4983 - def count(self,query,distinct=None,limit=None):
4984 if distinct: 4985 raise RuntimeError("COUNT DISTINCT not supported") 4986 (items, tablename, fields) = self.select_raw(query) 4987 # self.db['_lastsql'] = self._count(query) 4988 try: 4989 return len(items) 4990 except TypeError: 4991 return items.count(limit=limit)
4992
4993 - def delete(self,tablename, query):
4994 """ 4995 This function was changed on 2010-05-04 because according to 4996 http://code.google.com/p/googleappengine/issues/detail?id=3119 4997 GAE no longer supports deleting more than 1000 records. 4998 """ 4999 # self.db['_lastsql'] = self._delete(tablename,query) 5000 (items, tablename, fields) = self.select_raw(query) 5001 # items can be one item or a query 5002 if not isinstance(items,list): 5003 #use a keys_only query to ensure that this runs as a datastore 5004 # small operations 5005 leftitems = items.fetch(1000, keys_only=True) 5006 counter = 0 5007 while len(leftitems): 5008 counter += len(leftitems) 5009 gae.delete(leftitems) 5010 leftitems = items.fetch(1000, keys_only=True) 5011 else: 5012 counter = len(items) 5013 gae.delete(items) 5014 return counter
5015
5016 - def update(self,tablename,query,update_fields):
5017 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 5018 (items, tablename, fields) = self.select_raw(query) 5019 counter = 0 5020 for item in items: 5021 for field, value in update_fields: 5022 setattr(item, field.name, self.represent(value,field.type)) 5023 item.put() 5024 counter += 1 5025 LOGGER.info(str(counter)) 5026 return counter
5027
5028 - def insert(self,table,fields):
5029 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 5030 # table._db['_lastsql'] = self._insert(table,fields) 5031 tmp = table._tableobj(**dfields) 5032 tmp.put() 5033 rid = Reference(tmp.key().id()) 5034 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 5035 return rid
5036
5037 - def bulk_insert(self,table,items):
5038 parsed_items = [] 5039 for item in items: 5040 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 5041 parsed_items.append(table._tableobj(**dfields)) 5042 gae.put(parsed_items) 5043 return True
5044
5045 -def uuid2int(uuidv):
5046 return uuid.UUID(uuidv).int
5047
5048 -def int2uuid(n):
5049 return str(uuid.UUID(int=n))
5050
5051 -class CouchDBAdapter(NoSQLAdapter):
5052 drivers = ('couchdb',) 5053 5054 uploads_in_blob = True 5055 types = { 5056 'boolean': bool, 5057 'string': str, 5058 'text': str, 5059 'json': str, 5060 'password': str, 5061 'blob': str, 5062 'upload': str, 5063 'integer': long, 5064 'bigint': long, 5065 'float': float, 5066 'double': float, 5067 'date': datetime.date, 5068 'time': datetime.time, 5069 'datetime': datetime.datetime, 5070 'id': long, 5071 'reference': long, 5072 'list:string': list, 5073 'list:integer': list, 5074 'list:reference': list, 5075 } 5076
5077 - def file_exists(self, filename): pass
5078 - def file_open(self, filename, mode='rb', lock=True): pass
5079 - def file_close(self, fileobj): pass
5080
5081 - def expand(self,expression,field_type=None):
5082 if isinstance(expression,Field): 5083 if expression.type=='id': 5084 return "%s._id" % expression.tablename 5085 return BaseAdapter.expand(self,expression,field_type)
5086
5087 - def AND(self,first,second):
5088 return '(%s && %s)' % (self.expand(first),self.expand(second))
5089
5090 - def OR(self,first,second):
5091 return '(%s || %s)' % (self.expand(first),self.expand(second))
5092
5093 - def EQ(self,first,second):
5094 if second is None: 5095 return '(%s == null)' % self.expand(first) 5096 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5097
5098 - def NE(self,first,second):
5099 if second is None: 5100 return '(%s != null)' % self.expand(first) 5101 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5102
5103 - def COMMA(self,first,second):
5104 return '%s + %s' % (self.expand(first),self.expand(second))
5105
5106 - def represent(self, obj, fieldtype):
5107 value = NoSQLAdapter.represent(self, obj, fieldtype) 5108 if fieldtype=='id': 5109 return repr(str(long(value))) 5110 elif fieldtype in ('date','time','datetime','boolean'): 5111 return serializers.json(value) 5112 return repr(not isinstance(value,unicode) and value \ 5113 or value and value.encode('utf8'))
5114
5115 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5116 pool_size=0,folder=None,db_codec ='UTF-8', 5117 credential_decoder=IDENTITY, driver_args={}, 5118 adapter_args={}, do_connect=True, after_connection=None):
5119 self.db = db 5120 self.uri = uri 5121 if do_connect: self.find_driver(adapter_args) 5122 self.dbengine = 'couchdb' 5123 self.folder = folder 5124 db['_lastsql'] = '' 5125 self.db_codec = 'UTF-8' 5126 self._after_connection = after_connection 5127 self.pool_size = pool_size 5128 5129 url='http://'+uri[10:] 5130 def connector(url=url,driver_args=driver_args): 5131 return self.driver.Server(url,**driver_args)
5132 self.reconnect(connector,cursor=False)
5133
5134 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5135 if migrate: 5136 try: 5137 self.connection.create(table._tablename) 5138 except: 5139 pass
5140
5141 - def insert(self,table,fields):
5142 id = uuid2int(web2py_uuid()) 5143 ctable = self.connection[table._tablename] 5144 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5145 values['_id'] = str(id) 5146 ctable.save(values) 5147 return id
5148
5149 - def _select(self,query,fields,attributes):
5150 if not isinstance(query,Query): 5151 raise SyntaxError("Not Supported") 5152 for key in set(attributes.keys())-SELECT_ARGS: 5153 raise SyntaxError('invalid select attribute: %s' % key) 5154 new_fields=[] 5155 for item in fields: 5156 if isinstance(item,SQLALL): 5157 new_fields += item._table 5158 else: 5159 new_fields.append(item) 5160 def uid(fd): 5161 return fd=='id' and '_id' or fd
5162 def get(row,fd): 5163 return fd=='id' and long(row['_id']) or row.get(fd,None) 5164 fields = new_fields 5165 tablename = self.get_table(query) 5166 fieldnames = [f.name for f in (fields or self.db[tablename])] 5167 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5168 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5169 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5170 dict(t=tablename, 5171 query=self.expand(query), 5172 order='%s._id' % tablename, 5173 fields=fields) 5174 return fn, colnames 5175
5176 - def select(self,query,fields,attributes):
5177 if not isinstance(query,Query): 5178 raise SyntaxError("Not Supported") 5179 fn, colnames = self._select(query,fields,attributes) 5180 tablename = colnames[0].split('.')[0] 5181 ctable = self.connection[tablename] 5182 rows = [cols['value'] for cols in ctable.query(fn)] 5183 processor = attributes.get('processor',self.parse) 5184 return processor(rows,fields,colnames,False)
5185
5186 - def delete(self,tablename,query):
5187 if not isinstance(query,Query): 5188 raise SyntaxError("Not Supported") 5189 if query.first.type=='id' and query.op==self.EQ: 5190 id = query.second 5191 tablename = query.first.tablename 5192 assert(tablename == query.first.tablename) 5193 ctable = self.connection[tablename] 5194 try: 5195 del ctable[str(id)] 5196 return 1 5197 except couchdb.http.ResourceNotFound: 5198 return 0 5199 else: 5200 tablename = self.get_table(query) 5201 rows = self.select(query,[self.db[tablename]._id],{}) 5202 ctable = self.connection[tablename] 5203 for row in rows: 5204 del ctable[str(row.id)] 5205 return len(rows)
5206
5207 - def update(self,tablename,query,fields):
5208 if not isinstance(query,Query): 5209 raise SyntaxError("Not Supported") 5210 if query.first.type=='id' and query.op==self.EQ: 5211 id = query.second 5212 tablename = query.first.tablename 5213 ctable = self.connection[tablename] 5214 try: 5215 doc = ctable[str(id)] 5216 for key,value in fields: 5217 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5218 ctable.save(doc) 5219 return 1 5220 except couchdb.http.ResourceNotFound: 5221 return 0 5222 else: 5223 tablename = self.get_table(query) 5224 rows = self.select(query,[self.db[tablename]._id],{}) 5225 ctable = self.connection[tablename] 5226 table = self.db[tablename] 5227 for row in rows: 5228 doc = ctable[str(row.id)] 5229 for key,value in fields: 5230 doc[key.name] = self.represent(value,table[key.name].type) 5231 ctable.save(doc) 5232 return len(rows)
5233
5234 - def count(self,query,distinct=None):
5235 if distinct: 5236 raise RuntimeError("COUNT DISTINCT not supported") 5237 if not isinstance(query,Query): 5238 raise SyntaxError("Not Supported") 5239 tablename = self.get_table(query) 5240 rows = self.select(query,[self.db[tablename]._id],{}) 5241 return len(rows)
5242
5243 -def cleanup(text):
5244 """ 5245 validates that the given text is clean: only contains [0-9a-zA-Z_] 5246 """ 5247 if not REGEX_ALPHANUMERIC.match(text): 5248 raise SyntaxError('invalid table or field name: %s' % text) 5249 return text
5250
5251 -class MongoDBAdapter(NoSQLAdapter):
5252 native_json = True 5253 drivers = ('pymongo',) 5254 5255 uploads_in_blob = True 5256 5257 types = { 5258 'boolean': bool, 5259 'string': str, 5260 'text': str, 5261 'json': str, 5262 'password': str, 5263 'blob': str, 5264 'upload': str, 5265 'integer': long, 5266 'bigint': long, 5267 'float': float, 5268 'double': float, 5269 'date': datetime.date, 5270 'time': datetime.time, 5271 'datetime': datetime.datetime, 5272 'id': long, 5273 'reference': long, 5274 'list:string': list, 5275 'list:integer': list, 5276 'list:reference': list, 5277 } 5278 5279 error_messages = {"javascript_needed": "This must yet be replaced" + 5280 " with javascript in order to work."} 5281
5282 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5283 pool_size=0, folder=None, db_codec ='UTF-8', 5284 credential_decoder=IDENTITY, driver_args={}, 5285 adapter_args={}, do_connect=True, after_connection=None):
5286 5287 self.db = db 5288 self.uri = uri 5289 if do_connect: self.find_driver(adapter_args) 5290 import random 5291 from bson.objectid import ObjectId 5292 from bson.son import SON 5293 import pymongo.uri_parser 5294 5295 m = pymongo.uri_parser.parse_uri(uri) 5296 5297 self.SON = SON 5298 self.ObjectId = ObjectId 5299 self.random = random 5300 5301 self.dbengine = 'mongodb' 5302 self.folder = folder 5303 db['_lastsql'] = '' 5304 self.db_codec = 'UTF-8' 5305 self._after_connection = after_connection 5306 self.pool_size = pool_size 5307 #this is the minimum amount of replicates that it should wait 5308 # for on insert/update 5309 self.minimumreplication = adapter_args.get('minimumreplication',0) 5310 # by default all inserts and selects are performand asynchronous, 5311 # but now the default is 5312 # synchronous, except when overruled by either this default or 5313 # function parameter 5314 self.safe = adapter_args.get('safe',True) 5315 5316 if isinstance(m,tuple): 5317 m = {"database" : m[1]} 5318 if m.get('database')==None: 5319 raise SyntaxError("Database is required!") 5320 5321 def connector(uri=self.uri,m=m): 5322 # Connection() is deprecated 5323 if hasattr(self.driver, "MongoClient"): 5324 Connection = self.driver.MongoClient 5325 else: 5326 Connection = self.driver.Connection 5327 return Connection(uri)[m.get('database')]
5328 5329 self.reconnect(connector,cursor=False)
5330
5331 - def object_id(self, arg=None):
5332 """ Convert input to a valid Mongodb ObjectId instance 5333 5334 self.object_id("<random>") -> ObjectId (not unique) instance """ 5335 if not arg: 5336 arg = 0 5337 if isinstance(arg, basestring): 5338 # we assume an integer as default input 5339 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5340 if arg.isdigit() and (not rawhex): 5341 arg = int(arg) 5342 elif arg == "<random>": 5343 arg = int("0x%sL" % \ 5344 "".join([self.random.choice("0123456789abcdef") \ 5345 for x in range(24)]), 0) 5346 elif arg.isalnum(): 5347 if not arg.startswith("0x"): 5348 arg = "0x%s" % arg 5349 try: 5350 arg = int(arg, 0) 5351 except ValueError, e: 5352 raise ValueError( 5353 "invalid objectid argument string: %s" % e) 5354 else: 5355 raise ValueError("Invalid objectid argument string. " + 5356 "Requires an integer or base 16 value") 5357 elif isinstance(arg, self.ObjectId): 5358 return arg 5359 5360 if not isinstance(arg, (int, long)): 5361 raise TypeError("object_id argument must be of type " + 5362 "ObjectId or an objectid representable integer") 5363 if arg == 0: 5364 hexvalue = "".zfill(24) 5365 else: 5366 hexvalue = hex(arg)[2:].replace("L", "") 5367 return self.ObjectId(hexvalue)
5368
5369 - def parse_reference(self, value, field_type):
5370 # here we have to check for ObjectID before base parse 5371 if isinstance(value, self.ObjectId): 5372 value = long(str(value), 16) 5373 return super(MongoDBAdapter, 5374 self).parse_reference(value, field_type)
5375
5376 - def parse_id(self, value, field_type):
5377 if isinstance(value, self.ObjectId): 5378 value = long(str(value), 16) 5379 return super(MongoDBAdapter, 5380 self).parse_id(value, field_type)
5381
5382 - def represent(self, obj, fieldtype):
5383 # the base adatpter does not support MongoDB ObjectId 5384 if isinstance(obj, self.ObjectId): 5385 value = obj 5386 else: 5387 value = NoSQLAdapter.represent(self, obj, fieldtype) 5388 # reference types must be convert to ObjectID 5389 if fieldtype =='date': 5390 if value == None: 5391 return value 5392 # this piece of data can be stripped off based on the fieldtype 5393 t = datetime.time(0, 0, 0) 5394 # mongodb doesn't has a date object and so it must datetime, 5395 # string or integer 5396 return datetime.datetime.combine(value, t) 5397 elif fieldtype == 'time': 5398 if value == None: 5399 return value 5400 # this piece of data can be stripped of based on the fieldtype 5401 d = datetime.date(2000, 1, 1) 5402 # mongodb doesn't has a time object and so it must datetime, 5403 # string or integer 5404 return datetime.datetime.combine(d, value) 5405 elif fieldtype == "blob": 5406 from bson import Binary 5407 if not isinstance(value, Binary): 5408 return Binary(value) 5409 return value 5410 elif (isinstance(fieldtype, basestring) and 5411 fieldtype.startswith('list:')): 5412 if fieldtype.startswith('list:reference'): 5413 newval = [] 5414 for v in value: 5415 newval.append(self.object_id(v)) 5416 return newval 5417 return value 5418 elif ((isinstance(fieldtype, basestring) and 5419 fieldtype.startswith("reference")) or 5420 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5421 value = self.object_id(value) 5422 return value
5423
5424 - def create_table(self, table, migrate=True, fake_migrate=False, 5425 polymodel=None, isCapped=False):
5426 if isCapped: 5427 raise RuntimeError("Not implemented")
5428
5429 - def count(self, query, distinct=None, snapshot=True):
5430 if distinct: 5431 raise RuntimeError("COUNT DISTINCT not supported") 5432 if not isinstance(query,Query): 5433 raise SyntaxError("Not Supported") 5434 tablename = self.get_table(query) 5435 return long(self.select(query,[self.db[tablename]._id], {}, 5436 count=True,snapshot=snapshot)['count'])
5437 # Maybe it would be faster if we just implemented the pymongo 5438 # .count() function which is probably quicker? 5439 # therefor call __select() connection[table].find(query).count() 5440 # Since this will probably reduce the return set? 5441
5442 - def expand(self, expression, field_type=None):
5443 if isinstance(expression, Query): 5444 # any query using 'id':= 5445 # set name as _id (as per pymongo/mongodb primary key) 5446 # convert second arg to an objectid field 5447 # (if its not already) 5448 # if second arg is 0 convert to objectid 5449 if isinstance(expression.first,Field) and \ 5450 ((expression.first.type == 'id') or \ 5451 ("reference" in expression.first.type)): 5452 if expression.first.type == 'id': 5453 expression.first.name = '_id' 5454 # cast to Mongo ObjectId 5455 if isinstance(expression.second, (tuple, list, set)): 5456 expression.second = [self.object_id(item) for 5457 item in expression.second] 5458 else: 5459 expression.second = self.object_id(expression.second) 5460 result = expression.op(expression.first, expression.second) 5461 5462 if isinstance(expression, Field): 5463 if expression.type=='id': 5464 result = "_id" 5465 else: 5466 result = expression.name 5467 elif isinstance(expression, (Expression, Query)): 5468 if not expression.second is None: 5469 result = expression.op(expression.first, expression.second) 5470 elif not expression.first is None: 5471 result = expression.op(expression.first) 5472 elif not isinstance(expression.op, str): 5473 result = expression.op() 5474 else: 5475 result = expression.op 5476 elif field_type: 5477 result = self.represent(expression,field_type) 5478 elif isinstance(expression,(list,tuple)): 5479 result = ','.join(self.represent(item,field_type) for 5480 item in expression) 5481 else: 5482 result = expression 5483 return result
5484
5485 - def drop(self, table, mode=''):
5486 ctable = self.connection[table._tablename] 5487 ctable.drop()
5488
5489 - def truncate(self, table, mode, safe=None):
5490 if safe == None: 5491 safe=self.safe 5492 ctable = self.connection[table._tablename] 5493 ctable.remove(None, safe=True)
5494
5495 - def _select(self, query, fields, attributes):
5496 if 'for_update' in attributes: 5497 logging.warn('mongodb does not support for_update') 5498 for key in set(attributes.keys())-set(('limitby', 5499 'orderby','for_update')): 5500 if attributes[key]!=None: 5501 logging.warn('select attribute not implemented: %s' % key) 5502 5503 new_fields=[] 5504 mongosort_list = [] 5505 5506 # try an orderby attribute 5507 orderby = attributes.get('orderby', False) 5508 limitby = attributes.get('limitby', False) 5509 # distinct = attributes.get('distinct', False) 5510 if orderby: 5511 if isinstance(orderby, (list, tuple)): 5512 orderby = xorify(orderby) 5513 5514 # !!!! need to add 'random' 5515 for f in self.expand(orderby).split(','): 5516 if f.startswith('-'): 5517 mongosort_list.append((f[1:], -1)) 5518 else: 5519 mongosort_list.append((f, 1)) 5520 if limitby: 5521 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5522 else: 5523 limitby_skip = limitby_limit = 0 5524 5525 mongofields_dict = self.SON() 5526 mongoqry_dict = {} 5527 for item in fields: 5528 if isinstance(item, SQLALL): 5529 new_fields += item._table 5530 else: 5531 new_fields.append(item) 5532 fields = new_fields 5533 if isinstance(query,Query): 5534 tablename = self.get_table(query) 5535 elif len(fields) != 0: 5536 tablename = fields[0].tablename 5537 else: 5538 raise SyntaxError("The table name could not be found in " + 5539 "the query nor from the select statement.") 5540 mongoqry_dict = self.expand(query) 5541 fields = fields or self.db[tablename] 5542 for field in fields: 5543 mongofields_dict[field.name] = 1 5544 5545 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5546 limitby_limit, limitby_skip
5547
5548 - def select(self, query, fields, attributes, count=False, 5549 snapshot=False):
5550 # TODO: support joins 5551 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5552 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5553 ctable = self.connection[tablename] 5554 5555 if count: 5556 return {'count' : ctable.find( 5557 mongoqry_dict, mongofields_dict, 5558 skip=limitby_skip, limit=limitby_limit, 5559 sort=mongosort_list, snapshot=snapshot).count()} 5560 else: 5561 # pymongo cursor object 5562 mongo_list_dicts = ctable.find(mongoqry_dict, 5563 mongofields_dict, skip=limitby_skip, 5564 limit=limitby_limit, sort=mongosort_list, 5565 snapshot=snapshot) 5566 rows = [] 5567 # populate row in proper order 5568 # Here we replace ._id with .id to follow the standard naming 5569 colnames = [] 5570 newnames = [] 5571 for field in fields: 5572 colname = str(field) 5573 colnames.append(colname) 5574 tablename, fieldname = colname.split(".") 5575 if fieldname == "_id": 5576 # Mongodb reserved uuid key 5577 field.name = "id" 5578 newnames.append(".".join((tablename, field.name))) 5579 5580 for record in mongo_list_dicts: 5581 row=[] 5582 for colname in colnames: 5583 tablename, fieldname = colname.split(".") 5584 # switch to Mongo _id uuids for retrieving 5585 # record id's 5586 if fieldname == "id": fieldname = "_id" 5587 if fieldname in record: 5588 value = record[fieldname] 5589 else: 5590 value = None 5591 row.append(value) 5592 rows.append(row) 5593 5594 processor = attributes.get('processor', self.parse) 5595 result = processor(rows, fields, newnames, False) 5596 return result
5597
5598 - def _insert(self, table, fields):
5599 values = dict() 5600 for k, v in fields: 5601 if not k.name in ["id", "safe"]: 5602 fieldname = k.name 5603 fieldtype = table[k.name].type 5604 values[fieldname] = self.represent(v, fieldtype) 5605 return values
5606 5607 # Safe determines whether a asynchronious request is done or a 5608 # synchronious action is done 5609 # For safety, we use by default synchronous requests
5610 - def insert(self, table, fields, safe=None):
5611 if safe==None: 5612 safe = self.safe 5613 ctable = self.connection[table._tablename] 5614 values = self._insert(table, fields) 5615 ctable.insert(values, safe=safe) 5616 return long(str(values['_id']), 16)
5617 5618 #this function returns a dict with the where clause and update fields
5619 - def _update(self, tablename, query, fields):
5620 if not isinstance(query, Query): 5621 raise SyntaxError("Not Supported") 5622 filter = None 5623 if query: 5624 filter = self.expand(query) 5625 # do not try to update id fields to avoid backend errors 5626 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5627 k, v in fields if (not k.name in ("_id", "id")))} 5628 return modify, filter
5629
5630 - def update(self, tablename, query, fields, safe=None):
5631 if safe == None: 5632 safe = self.safe 5633 # return amount of adjusted rows or zero, but no exceptions 5634 # @ related not finding the result 5635 if not isinstance(query, Query): 5636 raise RuntimeError("Not implemented") 5637 amount = self.count(query, False) 5638 modify, filter = self._update(tablename, query, fields) 5639 try: 5640 result = self.connection[tablename].update(filter, 5641 modify, multi=True, safe=safe) 5642 if safe: 5643 try: 5644 # if result count is available fetch it 5645 return result["n"] 5646 except (KeyError, AttributeError, TypeError): 5647 return amount 5648 else: 5649 return amount 5650 except Exception, e: 5651 # TODO Reverse update query to verifiy that the query succeded 5652 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5653
5654 - def _delete(self, tablename, query):
5655 if not isinstance(query, Query): 5656 raise RuntimeError("query type %s is not supported" % \ 5657 type(query)) 5658 return self.expand(query)
5659
5660 - def delete(self, tablename, query, safe=None):
5661 if safe is None: 5662 safe = self.safe 5663 amount = 0 5664 amount = self.count(query, False) 5665 filter = self._delete(tablename, query) 5666 self.connection[tablename].remove(filter, safe=safe) 5667 return amount
5668
5669 - def bulk_insert(self, table, items):
5670 return [self.insert(table,item) for item in items]
5671 5672 ## OPERATORS
5673 - def INVERT(self, first):
5674 #print "in invert first=%s" % first 5675 return '-%s' % self.expand(first)
5676 5677 # TODO This will probably not work:(
5678 - def NOT(self, first):
5679 result = {} 5680 result["$not"] = self.expand(first) 5681 return result
5682
5683 - def AND(self,first,second):
5684 f = self.expand(first) 5685 s = self.expand(second) 5686 f.update(s) 5687 return f
5688
5689 - def OR(self,first,second):
5690 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5691 result = {} 5692 f = self.expand(first) 5693 s = self.expand(second) 5694 result['$or'] = [f,s] 5695 return result
5696
5697 - def BELONGS(self, first, second):
5698 if isinstance(second, str): 5699 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5700 elif second==[] or second==() or second==set(): 5701 return {1:0} 5702 items = [self.expand(item, first.type) for item in second] 5703 return {self.expand(first) : {"$in" : items} }
5704
5705 - def EQ(self,first,second=None):
5706 result = {} 5707 result[self.expand(first)] = self.expand(second) 5708 return result
5709
5710 - def NE(self, first, second=None):
5711 result = {} 5712 result[self.expand(first)] = {'$ne': self.expand(second)} 5713 return result
5714
5715 - def LT(self,first,second=None):
5716 if second is None: 5717 raise RuntimeError("Cannot compare %s < None" % first) 5718 result = {} 5719 result[self.expand(first)] = {'$lt': self.expand(second)} 5720 return result
5721
5722 - def LE(self,first,second=None):
5723 if second is None: 5724 raise RuntimeError("Cannot compare %s <= None" % first) 5725 result = {} 5726 result[self.expand(first)] = {'$lte': self.expand(second)} 5727 return result
5728
5729 - def GT(self,first,second):
5730 result = {} 5731 result[self.expand(first)] = {'$gt': self.expand(second)} 5732 return result
5733
5734 - def GE(self,first,second=None):
5735 if second is None: 5736 raise RuntimeError("Cannot compare %s >= None" % first) 5737 result = {} 5738 result[self.expand(first)] = {'$gte': self.expand(second)} 5739 return result
5740
5741 - def ADD(self, first, second):
5742 raise NotImplementedError(self.error_messages["javascript_needed"]) 5743 return '%s + %s' % (self.expand(first), 5744 self.expand(second, first.type))
5745
5746 - def SUB(self, first, second):
5747 raise NotImplementedError(self.error_messages["javascript_needed"]) 5748 return '(%s - %s)' % (self.expand(first), 5749 self.expand(second, first.type))
5750
5751 - def MUL(self, first, second):
5752 raise NotImplementedError(self.error_messages["javascript_needed"]) 5753 return '(%s * %s)' % (self.expand(first), 5754 self.expand(second, first.type))
5755
5756 - def DIV(self, first, second):
5757 raise NotImplementedError(self.error_messages["javascript_needed"]) 5758 return '(%s / %s)' % (self.expand(first), 5759 self.expand(second, first.type))
5760
5761 - def MOD(self, first, second):
5762 raise NotImplementedError(self.error_messages["javascript_needed"]) 5763 return '(%s %% %s)' % (self.expand(first), 5764 self.expand(second, first.type))
5765
5766 - def AS(self, first, second):
5767 raise NotImplementedError(self.error_messages["javascript_needed"]) 5768 return '%s AS %s' % (self.expand(first), second)
5769 5770 # We could implement an option that simulates a full featured SQL 5771 # database. But I think the option should be set explicit or 5772 # implemented as another library.
5773 - def ON(self, first, second):
5774 raise NotImplementedError("This is not possible in NoSQL" + 5775 " but can be simulated with a wrapper.") 5776 return '%s ON %s' % (self.expand(first), self.expand(second))
5777 5778 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5779 # WHICH ONE IS BEST? 5780
5781 - def COMMA(self, first, second):
5782 return '%s, %s' % (self.expand(first), self.expand(second))
5783
5784 - def LIKE(self, first, second):
5785 #escaping regex operators? 5786 return {self.expand(first): ('%s' % \ 5787 self.expand(second, 'string').replace('%','/'))}
5788
5789 - def STARTSWITH(self, first, second):
5790 #escaping regex operators? 5791 return {self.expand(first): ('/^%s/' % \ 5792 self.expand(second, 'string'))}
5793
5794 - def ENDSWITH(self, first, second):
5795 #escaping regex operators? 5796 return {self.expand(first): ('/%s^/' % \ 5797 self.expand(second, 'string'))}
5798
5799 - def CONTAINS(self, first, second, case_sensitive=False):
5800 # silently ignore, only case sensitive 5801 # There is a technical difference, but mongodb doesn't support 5802 # that, but the result will be the same 5803 val = second if isinstance(second,self.ObjectId) else \ 5804 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 5805 return {self.expand(first) : val}
5806
5807 - def LIKE(self, first, second):
5808 import re 5809 return {self.expand(first): {'$regex': \ 5810 re.escape(self.expand(second, 5811 'string')).replace('%','.*')}}
5812 5813 #TODO verify full compatibilty with official SQL Like operator
5814 - def STARTSWITH(self, first, second):
5815 #TODO Solve almost the same problem as with endswith 5816 import re 5817 return {self.expand(first): {'$regex' : '^' + 5818 re.escape(self.expand(second, 5819 'string'))}}
5820 5821 #TODO verify full compatibilty with official SQL Like operator
5822 - def ENDSWITH(self, first, second):
5823 #escaping regex operators? 5824 #TODO if searched for a name like zsa_corbitt and the function 5825 # is endswith('a') then this is also returned. 5826 # Aldo it end with a t 5827 import re 5828 return {self.expand(first): {'$regex': \ 5829 re.escape(self.expand(second, 'string')) + '$'}}
5830 5831 #TODO verify full compatibilty with official oracle contains operator
5832 - def CONTAINS(self, first, second, case_sensitive=False):
5833 # silently ignore, only case sensitive 5834 #There is a technical difference, but mongodb doesn't support 5835 # that, but the result will be the same 5836 #TODO contains operators need to be transformed to Regex 5837 return {self.expand(first) : {'$regex': \ 5838 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5839
5840 5841 -class IMAPAdapter(NoSQLAdapter):
5842 drivers = ('imaplib',) 5843 5844 """ IMAP server adapter 5845 5846 This class is intended as an interface with 5847 email IMAP servers to perform simple queries in the 5848 web2py DAL query syntax, so email read, search and 5849 other related IMAP mail services (as those implemented 5850 by brands like Google(r), and Yahoo!(r) 5851 can be managed from web2py applications. 5852 5853 The code uses examples by Yuji Tomita on this post: 5854 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5855 and is based in docs for Python imaplib, python email 5856 and email IETF's (i.e. RFC2060 and RFC3501) 5857 5858 This adapter was tested with a small set of operations with Gmail(r). Other 5859 services requests could raise command syntax and response data issues. 5860 5861 It creates its table and field names "statically", 5862 meaning that the developer should leave the table and field 5863 definitions to the DAL instance by calling the adapter's 5864 .define_tables() method. The tables are defined with the 5865 IMAP server mailbox list information. 5866 5867 .define_tables() returns a dictionary mapping dal tablenames 5868 to the server mailbox names with the following structure: 5869 5870 {<tablename>: str <server mailbox name>} 5871 5872 Here is a list of supported fields: 5873 5874 Field Type Description 5875 ################################################################ 5876 uid string 5877 answered boolean Flag 5878 created date 5879 content list:string A list of dict text or html parts 5880 to string 5881 cc string 5882 bcc string 5883 size integer the amount of octets of the message* 5884 deleted boolean Flag 5885 draft boolean Flag 5886 flagged boolean Flag 5887 sender string 5888 recent boolean Flag 5889 seen boolean Flag 5890 subject string 5891 mime string The mime header declaration 5892 email string The complete RFC822 message** 5893 attachments <type list> Each non text part as dict 5894 encoding string The main detected encoding 5895 5896 *At the application side it is measured as the length of the RFC822 5897 message string 5898 5899 WARNING: As row id's are mapped to email sequence numbers, 5900 make sure your imap client web2py app does not delete messages 5901 during select or update actions, to prevent 5902 updating or deleting different messages. 5903 Sequence numbers change whenever the mailbox is updated. 5904 To avoid this sequence numbers issues, it is recommended the use 5905 of uid fields in query references (although the update and delete 5906 in separate actions rule still applies). 5907 5908 # This is the code recommended to start imap support 5909 # at the app's model: 5910 5911 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5912 imapdb.define_tables() 5913 5914 Here is an (incomplete) list of possible imap commands: 5915 5916 # Count today's unseen messages 5917 # smaller than 6000 octets from the 5918 # inbox mailbox 5919 5920 q = imapdb.INBOX.seen == False 5921 q &= imapdb.INBOX.created == datetime.date.today() 5922 q &= imapdb.INBOX.size < 6000 5923 unread = imapdb(q).count() 5924 5925 # Fetch last query messages 5926 rows = imapdb(q).select() 5927 5928 # it is also possible to filter query select results with limitby and 5929 # sequences of mailbox fields 5930 5931 set.select(<fields sequence>, limitby=(<int>, <int>)) 5932 5933 # Mark last query messages as seen 5934 messages = [row.uid for row in rows] 5935 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5936 5937 # Delete messages in the imap database that have mails from mr. Gumby 5938 5939 deleted = 0 5940 for mailbox in imapdb.tables 5941 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5942 5943 # It is possible also to mark messages for deletion instead of ereasing them 5944 # directly with set.update(deleted=True) 5945 5946 5947 # This object give access 5948 # to the adapter auto mailbox 5949 # mapped names (which native 5950 # mailbox has what table name) 5951 5952 imapdb.mailboxes <dict> # tablename, server native name pairs 5953 5954 # To retrieve a table native mailbox name use: 5955 imapdb.<table>.mailbox 5956 5957 ### New features v2.4.1: 5958 5959 # Declare mailboxes statically with tablename, name pairs 5960 # This avoids the extra server names retrieval 5961 5962 imapdb.define_tables({"inbox": "INBOX"}) 5963 5964 # Selects without content/attachments/email columns will only 5965 # fetch header and flags 5966 5967 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5968 """ 5969 5970 types = { 5971 'string': str, 5972 'text': str, 5973 'date': datetime.date, 5974 'datetime': datetime.datetime, 5975 'id': long, 5976 'boolean': bool, 5977 'integer': int, 5978 'bigint': long, 5979 'blob': str, 5980 'list:string': str, 5981 } 5982 5983 dbengine = 'imap' 5984 5985 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5986
5987 - def __init__(self, 5988 db, 5989 uri, 5990 pool_size=0, 5991 folder=None, 5992 db_codec ='UTF-8', 5993 credential_decoder=IDENTITY, 5994 driver_args={}, 5995 adapter_args={}, 5996 do_connect=True, 5997 after_connection=None):
5998 5999 # db uri: user@example.com:password@imap.server.com:123 6000 # TODO: max size adapter argument for preventing large mail transfers 6001 6002 self.db = db 6003 self.uri = uri 6004 if do_connect: self.find_driver(adapter_args) 6005 self.pool_size=pool_size 6006 self.folder = folder 6007 self.db_codec = db_codec 6008 self._after_connection = after_connection 6009 self.credential_decoder = credential_decoder 6010 self.driver_args = driver_args 6011 self.adapter_args = adapter_args 6012 self.mailbox_size = None 6013 self.static_names = None 6014 self.charset = sys.getfilesystemencoding() 6015 # imap class 6016 self.imap4 = None 6017 uri = uri.split("://")[1] 6018 6019 """ MESSAGE is an identifier for sequence number""" 6020 6021 self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 6022 'flagged': '\\Flagged', 'recent': '\\Recent', 6023 'seen': '\\Seen', 'answered': '\\Answered'} 6024 self.search_fields = { 6025 'id': 'MESSAGE', 'created': 'DATE', 6026 'uid': 'UID', 'sender': 'FROM', 6027 'to': 'TO', 'cc': 'CC', 6028 'bcc': 'BCC', 'content': 'TEXT', 6029 'size': 'SIZE', 'deleted': '\\Deleted', 6030 'draft': '\\Draft', 'flagged': '\\Flagged', 6031 'recent': '\\Recent', 'seen': '\\Seen', 6032 'subject': 'SUBJECT', 'answered': '\\Answered', 6033 'mime': None, 'email': None, 6034 'attachments': None 6035 } 6036 6037 db['_lastsql'] = '' 6038 6039 m = self.REGEX_URI.match(uri) 6040 user = m.group('user') 6041 password = m.group('password') 6042 host = m.group('host') 6043 port = int(m.group('port')) 6044 over_ssl = False 6045 if port==993: 6046 over_ssl = True 6047 6048 driver_args.update(host=host,port=port, password=password, user=user) 6049 def connector(driver_args=driver_args): 6050 # it is assumed sucessful authentication alLways 6051 # TODO: support direct connection and login tests 6052 if over_ssl: 6053 self.imap4 = self.driver.IMAP4_SSL 6054 else: 6055 self.imap4 = self.driver.IMAP4 6056 connection = self.imap4(driver_args["host"], driver_args["port"]) 6057 data = connection.login(driver_args["user"], driver_args["password"]) 6058 6059 # static mailbox list 6060 connection.mailbox_names = None 6061 6062 # dummy cursor function 6063 connection.cursor = lambda : True 6064 6065 return connection
6066 6067 self.db.define_tables = self.define_tables 6068 self.connector = connector 6069 if do_connect: self.reconnect()
6070
6071 - def reconnect(self, f=None, cursor=True):
6072 """ 6073 IMAP4 Pool connection method 6074 6075 imap connection lacks of self cursor command. 6076 A custom command should be provided as a replacement 6077 for connection pooling to prevent uncaught remote session 6078 closing 6079 6080 """ 6081 if getattr(self,'connection',None) != None: 6082 return 6083 if f is None: 6084 f = self.connector 6085 6086 if not self.pool_size: 6087 self.connection = f() 6088 self.cursor = cursor and self.connection.cursor() 6089 else: 6090 POOLS = ConnectionPool.POOLS 6091 uri = self.uri 6092 while True: 6093 GLOBAL_LOCKER.acquire() 6094 if not uri in POOLS: 6095 POOLS[uri] = [] 6096 if POOLS[uri]: 6097 self.connection = POOLS[uri].pop() 6098 GLOBAL_LOCKER.release() 6099 self.cursor = cursor and self.connection.cursor() 6100 if self.cursor and self.check_active_connection: 6101 try: 6102 # check if connection is alive or close it 6103 result, data = self.connection.list() 6104 except: 6105 # Possible connection reset error 6106 # TODO: read exception class 6107 self.connection = f() 6108 break 6109 else: 6110 GLOBAL_LOCKER.release() 6111 self.connection = f() 6112 self.cursor = cursor and self.connection.cursor() 6113 break 6114 self.after_connection_hook()
6115
6116 - def get_last_message(self, tablename):
6117 last_message = None 6118 # request mailbox list to the server 6119 # if needed 6120 if not isinstance(self.connection.mailbox_names, dict): 6121 self.get_mailboxes() 6122 try: 6123 result = self.connection.select(self.connection.mailbox_names[tablename]) 6124 last_message = int(result[1][0]) 6125 except (IndexError, ValueError, TypeError, KeyError): 6126 e = sys.exc_info()[1] 6127 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 6128 return last_message
6129
6130 - def get_uid_bounds(self, tablename):
6131 if not isinstance(self.connection.mailbox_names, dict): 6132 self.get_mailboxes() 6133 # fetch first and last messages 6134 # return (first, last) messages uid's 6135 last_message = self.get_last_message(tablename) 6136 result, data = self.connection.uid("search", None, "(ALL)") 6137 uid_list = data[0].strip().split() 6138 if len(uid_list) <= 0: 6139 return None 6140 else: 6141 return (uid_list[0], uid_list[-1])
6142
6143 - def convert_date(self, date, add=None, imf=False):
6144 if add is None: 6145 add = datetime.timedelta() 6146 """ Convert a date object to a string 6147 with d-Mon-Y style for IMAP or the inverse 6148 case 6149 6150 add <timedelta> adds to the date object 6151 """ 6152 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6153 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6154 if isinstance(date, basestring): 6155 # Prevent unexpected date response format 6156 try: 6157 if "," in date: 6158 dayname, datestring = date.split(",") 6159 else: 6160 dayname, datestring = None, date 6161 date_list = datestring.strip().split() 6162 year = int(date_list[2]) 6163 month = months.index(date_list[1].upper()) 6164 day = int(date_list[0]) 6165 hms = map(int, date_list[3].split(":")) 6166 return datetime.datetime(year, month, day, 6167 hms[0], hms[1], hms[2]) + add 6168 except (ValueError, AttributeError, IndexError), e: 6169 LOGGER.error("Could not parse date text: %s. %s" % 6170 (date, e)) 6171 return None 6172 elif isinstance(date, (datetime.date, datetime.datetime)): 6173 if imf: date_format = "%a, %d %b %Y %H:%M:%S %z" 6174 else: date_format = "%d-%b-%Y" 6175 return (date + add).strftime(date_format) 6176 else: 6177 return None
6178 6179 @staticmethod
6180 - def header_represent(f, r):
6181 from email.header import decode_header 6182 text, encoding = decode_header(f)[0] 6183 if encoding: 6184 text = text.decode(encoding).encode('utf-8') 6185 return text
6186
6187 - def encode_text(self, text, charset, errors="replace"):
6188 """ convert text for mail to unicode""" 6189 if text is None: 6190 text = "" 6191 else: 6192 if isinstance(text, str): 6193 if charset is None: 6194 text = unicode(text, "utf-8", errors) 6195 else: 6196 text = unicode(text, charset, errors) 6197 else: 6198 raise Exception("Unsupported mail text type %s" % type(text)) 6199 return text.encode("utf-8")
6200
6201 - def get_charset(self, message):
6202 charset = message.get_content_charset() 6203 return charset
6204
6205 - def get_mailboxes(self):
6206 """ Query the mail database for mailbox names """ 6207 if self.static_names: 6208 # statically defined mailbox names 6209 self.connection.mailbox_names = self.static_names 6210 return self.static_names.keys() 6211 6212 mailboxes_list = self.connection.list() 6213 self.connection.mailbox_names = dict() 6214 mailboxes = list() 6215 x = 0 6216 for item in mailboxes_list[1]: 6217 x = x + 1 6218 item = item.strip() 6219 if not "NOSELECT" in item.upper(): 6220 sub_items = item.split("\"") 6221 sub_items = [sub_item for sub_item in sub_items \ 6222 if len(sub_item.strip()) > 0] 6223 # mailbox = sub_items[len(sub_items) -1] 6224 mailbox = sub_items[-1] 6225 # remove unwanted characters and store original names 6226 # Don't allow leading non alphabetic characters 6227 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6228 mailboxes.append(mailbox_name) 6229 self.connection.mailbox_names[mailbox_name] = mailbox 6230 6231 return mailboxes
6232
6233 - def get_query_mailbox(self, query):
6234 nofield = True 6235 tablename = None 6236 attr = query 6237 while nofield: 6238 if hasattr(attr, "first"): 6239 attr = attr.first 6240 if isinstance(attr, Field): 6241 return attr.tablename 6242 elif isinstance(attr, Query): 6243 pass 6244 else: 6245 return None 6246 else: 6247 return None 6248 return tablename
6249
6250 - def is_flag(self, flag):
6251 if self.search_fields.get(flag, None) in self.flags.values(): 6252 return True 6253 else: 6254 return False
6255
6256 - def define_tables(self, mailbox_names=None):
6257 """ 6258 Auto create common IMAP fileds 6259 6260 This function creates fields definitions "statically" 6261 meaning that custom fields as in other adapters should 6262 not be supported and definitions handled on a service/mode 6263 basis (local syntax for Gmail(r), Ymail(r) 6264 6265 Returns a dictionary with tablename, server native mailbox name 6266 pairs. 6267 """ 6268 if mailbox_names: 6269 # optional statically declared mailboxes 6270 self.static_names = mailbox_names 6271 else: 6272 self.static_names = None 6273 if not isinstance(self.connection.mailbox_names, dict): 6274 self.get_mailboxes() 6275 6276 names = self.connection.mailbox_names.keys() 6277 6278 for name in names: 6279 self.db.define_table("%s" % name, 6280 Field("uid", "string", writable=False), 6281 Field("answered", "boolean"), 6282 Field("created", "datetime", writable=False), 6283 Field("content", list, writable=False), 6284 Field("to", "string", writable=False), 6285 Field("cc", "string", writable=False), 6286 Field("bcc", "string", writable=False), 6287 Field("size", "integer", writable=False), 6288 Field("deleted", "boolean"), 6289 Field("draft", "boolean"), 6290 Field("flagged", "boolean"), 6291 Field("sender", "string", writable=False), 6292 Field("recent", "boolean", writable=False), 6293 Field("seen", "boolean"), 6294 Field("subject", "string", writable=False), 6295 Field("mime", "string", writable=False), 6296 Field("email", "string", writable=False, readable=False), 6297 Field("attachments", list, writable=False, readable=False), 6298 Field("encoding", writable=False) 6299 ) 6300 6301 # Set a special _mailbox attribute for storing 6302 # native mailbox names 6303 self.db[name].mailbox = \ 6304 self.connection.mailbox_names[name] 6305 6306 # decode quoted printable 6307 self.db[name].to.represent = self.db[name].cc.represent = \ 6308 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6309 self.db[name].subject.represent = self.header_represent 6310 6311 # Set the db instance mailbox collections 6312 self.db.mailboxes = self.connection.mailbox_names 6313 return self.db.mailboxes
6314
6315 - def create_table(self, *args, **kwargs):
6316 # not implemented 6317 # but required by DAL 6318 pass
6319
6320 - def _select(self, query, fields, attributes):
6321 if use_common_filters(query): 6322 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6323 return str(query)
6324
6325 - def select(self, query, fields, attributes):
6326 """ Search and Fetch records and return web2py rows 6327 """ 6328 # move this statement elsewhere (upper-level) 6329 if use_common_filters(query): 6330 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6331 6332 import email 6333 # get records from imap server with search + fetch 6334 # convert results to a dictionary 6335 tablename = None 6336 fetch_results = list() 6337 6338 if isinstance(query, Query): 6339 tablename = self.get_table(query) 6340 mailbox = self.connection.mailbox_names.get(tablename, None) 6341 if mailbox is None: 6342 raise ValueError("Mailbox name not found: %s" % mailbox) 6343 else: 6344 # select with readonly 6345 result, selected = self.connection.select(mailbox, True) 6346 if result != "OK": 6347 raise Exception("IMAP error: %s" % selected) 6348 self.mailbox_size = int(selected[0]) 6349 search_query = "(%s)" % str(query).strip() 6350 search_result = self.connection.uid("search", None, search_query) 6351 # Normal IMAP response OK is assumed (change this) 6352 if search_result[0] == "OK": 6353 # For "light" remote server responses just get the first 6354 # ten records (change for non-experimental implementation) 6355 # However, light responses are not guaranteed with this 6356 # approach, just fewer messages. 6357 limitby = attributes.get('limitby', None) 6358 messages_set = search_result[1][0].split() 6359 # descending order 6360 messages_set.reverse() 6361 if limitby is not None: 6362 # TODO: orderby, asc/desc, limitby from complete message set 6363 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6364 6365 # keep the requests small for header/flags 6366 if any([(field.name in ["content", "size", 6367 "attachments", "email"]) for 6368 field in fields]): 6369 imap_fields = "(RFC822 FLAGS)" 6370 else: 6371 imap_fields = "(RFC822.HEADER FLAGS)" 6372 6373 if len(messages_set) > 0: 6374 # create fetch results object list 6375 # fetch each remote message and store it in memmory 6376 # (change to multi-fetch command syntax for faster 6377 # transactions) 6378 for uid in messages_set: 6379 # fetch the RFC822 message body 6380 typ, data = self.connection.uid("fetch", uid, imap_fields) 6381 if typ == "OK": 6382 fr = {"message": int(data[0][0].split()[0]), 6383 "uid": long(uid), 6384 "email": email.message_from_string(data[0][1]), 6385 "raw_message": data[0][1]} 6386 fr["multipart"] = fr["email"].is_multipart() 6387 # fetch flags for the message 6388 fr["flags"] = self.driver.ParseFlags(data[1]) 6389 fetch_results.append(fr) 6390 else: 6391 # error retrieving the message body 6392 raise Exception("IMAP error retrieving the body: %s" % data) 6393 else: 6394 raise Exception("IMAP search error: %s" % search_result[1]) 6395 elif isinstance(query, (Expression, basestring)): 6396 raise NotImplementedError() 6397 else: 6398 raise TypeError("Unexpected query type") 6399 6400 imapqry_dict = {} 6401 imapfields_dict = {} 6402 6403 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6404 allfields = True 6405 elif len(fields) == 0: 6406 allfields = True 6407 else: 6408 allfields = False 6409 if allfields: 6410 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6411 else: 6412 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6413 6414 for k in colnames: 6415 imapfields_dict[k] = k 6416 6417 imapqry_list = list() 6418 imapqry_array = list() 6419 for fr in fetch_results: 6420 attachments = [] 6421 content = [] 6422 size = 0 6423 n = int(fr["message"]) 6424 item_dict = dict() 6425 message = fr["email"] 6426 uid = fr["uid"] 6427 charset = self.get_charset(message) 6428 flags = fr["flags"] 6429 raw_message = fr["raw_message"] 6430 # Return messages data mapping static fields 6431 # and fetched results. Mapping should be made 6432 # outside the select function (with auxiliary 6433 # instance methods) 6434 6435 # pending: search flags states trough the email message 6436 # instances for correct output 6437 6438 # preserve subject encoding (ASCII/quoted printable) 6439 6440 if "%s.id" % tablename in colnames: 6441 item_dict["%s.id" % tablename] = n 6442 if "%s.created" % tablename in colnames: 6443 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6444 if "%s.uid" % tablename in colnames: 6445 item_dict["%s.uid" % tablename] = uid 6446 if "%s.sender" % tablename in colnames: 6447 # If there is no encoding found in the message header 6448 # force utf-8 replacing characters (change this to 6449 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6450 item_dict["%s.sender" % tablename] = message["From"] 6451 if "%s.to" % tablename in colnames: 6452 item_dict["%s.to" % tablename] = message["To"] 6453 if "%s.cc" % tablename in colnames: 6454 if "Cc" in message.keys(): 6455 item_dict["%s.cc" % tablename] = message["Cc"] 6456 else: 6457 item_dict["%s.cc" % tablename] = "" 6458 if "%s.bcc" % tablename in colnames: 6459 if "Bcc" in message.keys(): 6460 item_dict["%s.bcc" % tablename] = message["Bcc"] 6461 else: 6462 item_dict["%s.bcc" % tablename] = "" 6463 if "%s.deleted" % tablename in colnames: 6464 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6465 if "%s.draft" % tablename in colnames: 6466 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6467 if "%s.flagged" % tablename in colnames: 6468 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6469 if "%s.recent" % tablename in colnames: 6470 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6471 if "%s.seen" % tablename in colnames: 6472 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6473 if "%s.subject" % tablename in colnames: 6474 item_dict["%s.subject" % tablename] = message["Subject"] 6475 if "%s.answered" % tablename in colnames: 6476 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6477 if "%s.mime" % tablename in colnames: 6478 item_dict["%s.mime" % tablename] = message.get_content_type() 6479 if "%s.encoding" % tablename in colnames: 6480 item_dict["%s.encoding" % tablename] = charset 6481 6482 # Here goes the whole RFC822 body as an email instance 6483 # for controller side custom processing 6484 # The message is stored as a raw string 6485 # >> email.message_from_string(raw string) 6486 # returns a Message object for enhanced object processing 6487 if "%s.email" % tablename in colnames: 6488 # WARNING: no encoding performed (raw message) 6489 item_dict["%s.email" % tablename] = raw_message 6490 6491 # Size measure as suggested in a Velocity Reviews post 6492 # by Tim Williams: "how to get size of email attachment" 6493 # Note: len() and server RFC822.SIZE reports doesn't match 6494 # To retrieve the server size for representation would add a new 6495 # fetch transaction to the process 6496 for part in message.walk(): 6497 maintype = part.get_content_maintype() 6498 if ("%s.attachments" % tablename in colnames) or \ 6499 ("%s.content" % tablename in colnames): 6500 payload = part.get_payload(decode=True) 6501 if payload: 6502 filename = part.get_filename() 6503 values = {"mime": part.get_content_type()} 6504 if ((filename or not "text" in maintype) and 6505 ("%s.attachments" % tablename in colnames)): 6506 values.update({"payload": payload, 6507 "filename": filename, 6508 "encoding": part.get_content_charset(), 6509 "disposition": part["Content-Disposition"]}) 6510 attachments.append(values) 6511 elif (("text" in maintype) and 6512 ("%s.content" % tablename in colnames)): 6513 values.update({"text": self.encode_text(payload, 6514 self.get_charset(part))}) 6515 content.append(values) 6516 6517 if "%s.size" % tablename in colnames: 6518 if part is not None: 6519 size += len(str(part)) 6520 item_dict["%s.content" % tablename] = content 6521 item_dict["%s.attachments" % tablename] = attachments 6522 item_dict["%s.size" % tablename] = size 6523 imapqry_list.append(item_dict) 6524 6525 # extra object mapping for the sake of rows object 6526 # creation (sends an array or lists) 6527 for item_dict in imapqry_list: 6528 imapqry_array_item = list() 6529 for fieldname in colnames: 6530 imapqry_array_item.append(item_dict[fieldname]) 6531 imapqry_array.append(imapqry_array_item) 6532 6533 # parse result and return a rows object 6534 colnames = colnames 6535 processor = attributes.get('processor',self.parse) 6536 return processor(imapqry_array, fields, colnames)
6537
6538 - def _insert(self, table, fields):
6539 def add_payload(message, obj): 6540 payload = Message() 6541 encoding = obj.get("encoding", "utf-8") 6542 if encoding and (encoding.upper() in 6543 ("BASE64", "7BIT", "8BIT", "BINARY")): 6544 payload.add_header("Content-Transfer-Encoding", encoding) 6545 else: 6546 payload.set_charset(encoding) 6547 mime = obj.get("mime", None) 6548 if mime: 6549 payload.set_type(mime) 6550 if "text" in obj: 6551 payload.set_payload(obj["text"]) 6552 elif "payload" in obj: 6553 payload.set_payload(obj["payload"]) 6554 if "filename" in obj and obj["filename"]: 6555 payload.add_header("Content-Disposition", 6556 "attachment", filename=obj["filename"]) 6557 message.attach(payload)
6558 6559 mailbox = table.mailbox 6560 d = dict(((k.name, v) for k, v in fields)) 6561 date_time = d.get("created", datetime.datetime.now()) 6562 struct_time = date_time.timetuple() 6563 if len(d) > 0: 6564 message = d.get("email", None) 6565 attachments = d.get("attachments", []) 6566 content = d.get("content", []) 6567 flags = " ".join(["\\%s" % flag.capitalize() for flag in 6568 ("answered", "deleted", "draft", "flagged", 6569 "recent", "seen") if d.get(flag, False)]) 6570 if not message: 6571 from email.message import Message 6572 mime = d.get("mime", None) 6573 charset = d.get("encoding", None) 6574 message = Message() 6575 message["from"] = d.get("sender", "") 6576 message["subject"] = d.get("subject", "") 6577 message["date"] = self.convert_date(date_time, imf=True) 6578 6579 if mime: 6580 message.set_type(mime) 6581 if charset: 6582 message.set_charset(charset) 6583 for item in ("to", "cc", "bcc"): 6584 value = d.get(item, "") 6585 if isinstance(value, basestring): 6586 message[item] = value 6587 else: 6588 message[item] = ";".join([i for i in 6589 value]) 6590 if (not message.is_multipart() and 6591 (not message.get_content_type().startswith( 6592 "multipart"))): 6593 if isinstance(content, basestring): 6594 message.set_payload(content) 6595 elif len(content) > 0: 6596 message.set_payload(content[0]["text"]) 6597 else: 6598 [add_payload(message, c) for c in content] 6599 [add_payload(message, a) for a in attachments] 6600 message = message.as_string() 6601 return (mailbox, flags, struct_time, message) 6602 else: 6603 raise NotImplementedError("IMAP empty insert is not implemented") 6604
6605 - def insert(self, table, fields):
6606 values = self._insert(table, fields) 6607 result, data = self.connection.append(*values) 6608 if result == "OK": 6609 uid = int(re.findall("\d+", str(data))[-1]) 6610 return self.db(table.uid==uid).select(table.id).first().id 6611 else: 6612 raise Exception("IMAP message append failed: %s" % data)
6613
6614 - def _update(self, tablename, query, fields, commit=False):
6615 # TODO: the adapter should implement an .expand method 6616 commands = list() 6617 if use_common_filters(query): 6618 query = self.common_filter(query, [tablename,]) 6619 mark = [] 6620 unmark = [] 6621 if query: 6622 for item in fields: 6623 field = item[0] 6624 name = field.name 6625 value = item[1] 6626 if self.is_flag(name): 6627 flag = self.search_fields[name] 6628 if (value is not None) and (flag != "\\Recent"): 6629 if value: 6630 mark.append(flag) 6631 else: 6632 unmark.append(flag) 6633 result, data = self.connection.select( 6634 self.connection.mailbox_names[tablename]) 6635 string_query = "(%s)" % query 6636 result, data = self.connection.search(None, string_query) 6637 store_list = [item.strip() for item in data[0].split() 6638 if item.strip().isdigit()] 6639 # build commands for marked flags 6640 for number in store_list: 6641 result = None 6642 if len(mark) > 0: 6643 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6644 if len(unmark) > 0: 6645 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6646 return commands
6647
6648 - def update(self, tablename, query, fields):
6649 rowcount = 0 6650 commands = self._update(tablename, query, fields) 6651 for command in commands: 6652 result, data = self.connection.store(*command) 6653 if result == "OK": 6654 rowcount += 1 6655 else: 6656 raise Exception("IMAP storing error: %s" % data) 6657 return rowcount
6658
6659 - def _count(self, query, distinct=None):
6660 raise NotImplementedError()
6661
6662 - def count(self,query,distinct=None):
6663 counter = 0 6664 tablename = self.get_query_mailbox(query) 6665 if query and tablename is not None: 6666 if use_common_filters(query): 6667 query = self.common_filter(query, [tablename,]) 6668 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6669 string_query = "(%s)" % query 6670 result, data = self.connection.search(None, string_query) 6671 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6672 counter = len(store_list) 6673 return counter
6674
6675 - def delete(self, tablename, query):
6676 counter = 0 6677 if query: 6678 if use_common_filters(query): 6679 query = self.common_filter(query, [tablename,]) 6680 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6681 string_query = "(%s)" % query 6682 result, data = self.connection.search(None, string_query) 6683 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6684 for number in store_list: 6685 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6686 if result == "OK": 6687 counter += 1 6688 else: 6689 raise Exception("IMAP store error: %s" % data) 6690 if counter > 0: 6691 result, data = self.connection.expunge() 6692 return counter
6693
6694 - def BELONGS(self, first, second):
6695 result = None 6696 name = self.search_fields[first.name] 6697 if name == "MESSAGE": 6698 values = [str(val) for val in second if str(val).isdigit()] 6699 result = "%s" % ",".join(values).strip() 6700 6701 elif name == "UID": 6702 values = [str(val) for val in second if str(val).isdigit()] 6703 result = "UID %s" % ",".join(values).strip() 6704 6705 else: 6706 raise Exception("Operation not supported") 6707 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6708 return result
6709
6710 - def CONTAINS(self, first, second, case_sensitive=False):
6711 # silently ignore, only case sensitive 6712 result = None 6713 name = self.search_fields[first.name] 6714 6715 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6716 result = "%s \"%s\"" % (name, self.expand(second)) 6717 else: 6718 if first.name in ("cc", "bcc"): 6719 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6720 elif first.name == "mime": 6721 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6722 else: 6723 raise Exception("Operation not supported") 6724 return result
6725
6726 - def GT(self, first, second):
6727 result = None 6728 name = self.search_fields[first.name] 6729 if name == "MESSAGE": 6730 last_message = self.get_last_message(first.tablename) 6731 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6732 elif name == "UID": 6733 # GT and LT may not return 6734 # expected sets depending on 6735 # the uid format implemented 6736 try: 6737 pedestal, threshold = self.get_uid_bounds(first.tablename) 6738 except TypeError: 6739 e = sys.exc_info()[1] 6740 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6741 return "" 6742 try: 6743 lower_limit = int(self.expand(second)) + 1 6744 except (ValueError, TypeError): 6745 e = sys.exc_info()[1] 6746 raise Exception("Operation not supported (non integer UID)") 6747 result = "UID %s:%s" % (lower_limit, threshold) 6748 elif name == "DATE": 6749 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6750 elif name == "SIZE": 6751 result = "LARGER %s" % self.expand(second) 6752 else: 6753 raise Exception("Operation not supported") 6754 return result
6755
6756 - def GE(self, first, second):
6757 result = None 6758 name = self.search_fields[first.name] 6759 if name == "MESSAGE": 6760 last_message = self.get_last_message(first.tablename) 6761 result = "%s:%s" % (self.expand(second), last_message) 6762 elif name == "UID": 6763 # GT and LT may not return 6764 # expected sets depending on 6765 # the uid format implemented 6766 try: 6767 pedestal, threshold = self.get_uid_bounds(first.tablename) 6768 except TypeError: 6769 e = sys.exc_info()[1] 6770 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6771 return "" 6772 lower_limit = self.expand(second) 6773 result = "UID %s:%s" % (lower_limit, threshold) 6774 elif name == "DATE": 6775 result = "SINCE %s" % self.convert_date(second) 6776 else: 6777 raise Exception("Operation not supported") 6778 return result
6779
6780 - def LT(self, first, second):
6781 result = None 6782 name = self.search_fields[first.name] 6783 if name == "MESSAGE": 6784 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6785 elif name == "UID": 6786 try: 6787 pedestal, threshold = self.get_uid_bounds(first.tablename) 6788 except TypeError: 6789 e = sys.exc_info()[1] 6790 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6791 return "" 6792 try: 6793 upper_limit = int(self.expand(second)) - 1 6794 except (ValueError, TypeError): 6795 e = sys.exc_info()[1] 6796 raise Exception("Operation not supported (non integer UID)") 6797 result = "UID %s:%s" % (pedestal, upper_limit) 6798 elif name == "DATE": 6799 result = "BEFORE %s" % self.convert_date(second) 6800 elif name == "SIZE": 6801 result = "SMALLER %s" % self.expand(second) 6802 else: 6803 raise Exception("Operation not supported") 6804 return result
6805
6806 - def LE(self, first, second):
6807 result = None 6808 name = self.search_fields[first.name] 6809 if name == "MESSAGE": 6810 result = "%s:%s" % (1, self.expand(second)) 6811 elif name == "UID": 6812 try: 6813 pedestal, threshold = self.get_uid_bounds(first.tablename) 6814 except TypeError: 6815 e = sys.exc_info()[1] 6816 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6817 return "" 6818 upper_limit = int(self.expand(second)) 6819 result = "UID %s:%s" % (pedestal, upper_limit) 6820 elif name == "DATE": 6821 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6822 else: 6823 raise Exception("Operation not supported") 6824 return result
6825
6826 - def NE(self, first, second=None):
6827 if (second is None) and isinstance(first, Field): 6828 # All records special table query 6829 if first.type == "id": 6830 return self.GE(first, 1) 6831 result = self.NOT(self.EQ(first, second)) 6832 result = result.replace("NOT NOT", "").strip() 6833 return result
6834
6835 - def EQ(self,first,second):
6836 name = self.search_fields[first.name] 6837 result = None 6838 if name is not None: 6839 if name == "MESSAGE": 6840 # query by message sequence number 6841 result = "%s" % self.expand(second) 6842 elif name == "UID": 6843 result = "UID %s" % self.expand(second) 6844 elif name == "DATE": 6845 result = "ON %s" % self.convert_date(second) 6846 6847 elif name in self.flags.values(): 6848 if second: 6849 result = "%s" % (name.upper()[1:]) 6850 else: 6851 result = "NOT %s" % (name.upper()[1:]) 6852 else: 6853 raise Exception("Operation not supported") 6854 else: 6855 raise Exception("Operation not supported") 6856 return result
6857
6858 - def AND(self, first, second):
6859 result = "%s %s" % (self.expand(first), self.expand(second)) 6860 return result
6861
6862 - def OR(self, first, second):
6863 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6864 return "%s" % result.replace("OR OR", "OR")
6865
6866 - def NOT(self, first):
6867 result = "NOT %s" % self.expand(first) 6868 return result
6869 6870 ######################################################################## 6871 # end of adapters 6872 ######################################################################## 6873 6874 ADAPTERS = { 6875 'sqlite': SQLiteAdapter, 6876 'spatialite': SpatiaLiteAdapter, 6877 'sqlite:memory': SQLiteAdapter, 6878 'spatialite:memory': SpatiaLiteAdapter, 6879 'mysql': MySQLAdapter, 6880 'postgres': PostgreSQLAdapter, 6881 'postgres:psycopg2': PostgreSQLAdapter, 6882 'postgres:pg8000': PostgreSQLAdapter, 6883 'postgres2:psycopg2': NewPostgreSQLAdapter, 6884 'postgres2:pg8000': NewPostgreSQLAdapter, 6885 'oracle': OracleAdapter, 6886 'mssql': MSSQLAdapter, 6887 'mssql2': MSSQL2Adapter, 6888 'mssql3': MSSQL3Adapter, 6889 'mssql4' : MSSQL4Adapter, 6890 'vertica': VerticaAdapter, 6891 'sybase': SybaseAdapter, 6892 'db2': DB2Adapter, 6893 'teradata': TeradataAdapter, 6894 'informix': InformixAdapter, 6895 'informix-se': InformixSEAdapter, 6896 'firebird': FireBirdAdapter, 6897 'firebird_embedded': FireBirdAdapter, 6898 'ingres': IngresAdapter, 6899 'ingresu': IngresUnicodeAdapter, 6900 'sapdb': SAPDBAdapter, 6901 'cubrid': CubridAdapter, 6902 'jdbc:sqlite': JDBCSQLiteAdapter, 6903 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6904 'jdbc:postgres': JDBCPostgreSQLAdapter, 6905 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6906 'google:datastore': GoogleDatastoreAdapter, 6907 'google:sql': GoogleSQLAdapter, 6908 'couchdb': CouchDBAdapter, 6909 'mongodb': MongoDBAdapter, 6910 'imap': IMAPAdapter 6911 }
6912 6913 -def sqlhtml_validators(field):
6914 """ 6915 Field type validation, using web2py's validators mechanism. 6916 6917 makes sure the content of a field is in line with the declared 6918 fieldtype 6919 """ 6920 db = field.db 6921 try: 6922 from gluon import validators 6923 except ImportError: 6924 return [] 6925 field_type, field_length = field.type, field.length 6926 if isinstance(field_type, SQLCustomType): 6927 if hasattr(field_type, 'validator'): 6928 return field_type.validator 6929 else: 6930 field_type = field_type.type 6931 elif not isinstance(field_type,str): 6932 return [] 6933 requires=[] 6934 def ff(r,id): 6935 row=r(id) 6936 if not row: 6937 return id 6938 elif hasattr(r, '_format') and isinstance(r._format,str): 6939 return r._format % row 6940 elif hasattr(r, '_format') and callable(r._format): 6941 return r._format(row) 6942 else: 6943 return id
6944 if field_type in (('string', 'text', 'password')): 6945 requires.append(validators.IS_LENGTH(field_length)) 6946 elif field_type == 'json': 6947 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 6948 elif field_type == 'double' or field_type == 'float': 6949 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6950 elif field_type == 'integer': 6951 requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31)) 6952 elif field_type == 'bigint': 6953 requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63)) 6954 elif field_type.startswith('decimal'): 6955 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6956 elif field_type == 'date': 6957 requires.append(validators.IS_DATE()) 6958 elif field_type == 'time': 6959 requires.append(validators.IS_TIME()) 6960 elif field_type == 'datetime': 6961 requires.append(validators.IS_DATETIME()) 6962 elif db and field_type.startswith('reference') and \ 6963 field_type.find('.') < 0 and \ 6964 field_type[10:] in db.tables: 6965 referenced = db[field_type[10:]] 6966 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6967 field.represent = field.represent or repr_ref 6968 if hasattr(referenced, '_format') and referenced._format: 6969 requires = validators.IS_IN_DB(db,referenced._id, 6970 referenced._format) 6971 if field.unique: 6972 requires._and = validators.IS_NOT_IN_DB(db,field) 6973 if field.tablename == field_type[10:]: 6974 return validators.IS_EMPTY_OR(requires) 6975 return requires 6976 elif db and field_type.startswith('list:reference') and \ 6977 field_type.find('.') < 0 and \ 6978 field_type[15:] in db.tables: 6979 referenced = db[field_type[15:]] 6980 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6981 if not ids: 6982 return None 6983 refs = None 6984 db, id = r._db, r._id 6985 if isinstance(db._adapter, GoogleDatastoreAdapter): 6986 def count(values): return db(id.belongs(values)).select(id) 6987 rx = range(0, len(ids), 30) 6988 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6989 else: 6990 refs = db(id.belongs(ids)).select(id) 6991 return (refs and ', '.join(f(r,x.id) for x in refs) or '') 6992 field.represent = field.represent or list_ref_repr 6993 if hasattr(referenced, '_format') and referenced._format: 6994 requires = validators.IS_IN_DB(db,referenced._id, 6995 referenced._format,multiple=True) 6996 else: 6997 requires = validators.IS_IN_DB(db,referenced._id, 6998 multiple=True) 6999 if field.unique: 7000 requires._and = validators.IS_NOT_IN_DB(db,field) 7001 if not field.notnull: 7002 requires = validators.IS_EMPTY_OR(requires) 7003 return requires 7004 elif field_type.startswith('list:'): 7005 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 7006 field.represent = field.represent or repr_list 7007 if field.unique: 7008 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 7009 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 7010 if field.notnull and not field_type[:2] in sff: 7011 requires.insert(0, validators.IS_NOT_EMPTY()) 7012 elif not field.notnull and field_type[:2] in sff and requires: 7013 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 7014 return requires 7015
7016 7017 -def bar_escape(item):
7018 return str(item).replace('|', '||')
7019
7020 -def bar_encode(items):
7021 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
7022
7023 -def bar_decode_integer(value):
7024 if not hasattr(value,'split') and hasattr(value,'read'): 7025 value = value.read() 7026 return [long(x) for x in value.split('|') if x.strip()]
7027
7028 -def bar_decode_string(value):
7029 return [x.replace('||', '|') for x in 7030 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
7031
7032 7033 -class Row(object):
7034 7035 """ 7036 a dictionary that lets you do d['a'] as well as d.a 7037 this is only used to store a Row 7038 """ 7039 7040 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 7041
7042 - def __getitem__(self, k):
7043 key=str(k) 7044 _extra = self.__dict__.get('_extra', None) 7045 if _extra is not None: 7046 v = _extra.get(key, DEFAULT) 7047 if v != DEFAULT: 7048 return v 7049 m = REGEX_TABLE_DOT_FIELD.match(key) 7050 if m: 7051 try: 7052 return ogetattr(self, m.group(1))[m.group(2)] 7053 except (KeyError,AttributeError,TypeError): 7054 key = m.group(2) 7055 try: 7056 return ogetattr(self, key) 7057 except (KeyError,AttributeError,TypeError), ae: 7058 try: 7059 self[key] = ogetattr(self,'__get_lazy_reference__')(key) 7060 return self[key] 7061 except: 7062 raise ae
7063 7064 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 7065 7066 __delitem__ = object.__delattr__ 7067 7068 __copy__ = lambda self: Row(self) 7069 7070 __call__ = __getitem__ 7071 7072
7073 - def get(self, key, default=None):
7074 try: 7075 return self.__getitem__(key) 7076 except(KeyError, AttributeError, TypeError): 7077 return self.__dict__.get(key,default)
7078 7079 has_key = __contains__ = lambda self, key: key in self.__dict__ 7080 7081 __nonzero__ = lambda self: len(self.__dict__)>0 7082 7083 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 7084 7085 keys = lambda self: self.__dict__.keys() 7086 7087 items = lambda self: self.__dict__.items() 7088 7089 values = lambda self: self.__dict__.values() 7090 7091 __iter__ = lambda self: self.__dict__.__iter__() 7092 7093 iteritems = lambda self: self.__dict__.iteritems() 7094 7095 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 7096 7097 __int__ = lambda self: object.__getattribute__(self,'id') 7098 7099 __long__ = lambda self: long(object.__getattribute__(self,'id')) 7100 7101 __getattr__ = __getitem__ 7102 7103 # def __getattribute__(self, key): 7104 # try: 7105 # return object.__getattribute__(self, key) 7106 # except AttributeError, ae: 7107 # try: 7108 # return self.__get_lazy_reference__(key) 7109 # except: 7110 # raise ae 7111
7112 - def __eq__(self,other):
7113 try: 7114 return self.as_dict() == other.as_dict() 7115 except AttributeError: 7116 return False
7117
7118 - def __ne__(self,other):
7119 return not (self == other)
7120
7121 - def __copy__(self):
7122 return Row(dict(self))
7123
7124 - def as_dict(self, datetime_to_str=False, custom_types=None):
7125 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 7126 if isinstance(custom_types,(list,tuple,set)): 7127 SERIALIZABLE_TYPES += custom_types 7128 elif custom_types: 7129 SERIALIZABLE_TYPES.append(custom_types) 7130 d = dict(self) 7131 for k in copy.copy(d.keys()): 7132 v=d[k] 7133 if d[k] is None: 7134 continue 7135 elif isinstance(v,Row): 7136 d[k]=v.as_dict() 7137 elif isinstance(v,Reference): 7138 d[k]=long(v) 7139 elif isinstance(v,decimal.Decimal): 7140 d[k]=float(v) 7141 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 7142 if datetime_to_str: 7143 d[k] = v.isoformat().replace('T',' ')[:19] 7144 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 7145 del d[k] 7146 return d
7147
7148 - def as_xml(self, row_name="row", colnames=None, indent=' '):
7149 def f(row,field,indent=' '): 7150 if isinstance(row,Row): 7151 spc = indent+' \n' 7152 items = [f(row[x],x,indent+' ') for x in row] 7153 return '%s<%s>\n%s\n%s</%s>' % ( 7154 indent, 7155 field, 7156 spc.join(item for item in items if item), 7157 indent, 7158 field) 7159 elif not callable(row): 7160 if REGEX_ALPHANUMERIC.match(field): 7161 return '%s<%s>%s</%s>' % (indent,field,row,field) 7162 else: 7163 return '%s<extra name="%s">%s</extra>' % \ 7164 (indent,field,row) 7165 else: 7166 return None
7167 return f(self, row_name, indent=indent)
7168
7169 - def as_json(self, mode="object", default=None, colnames=None, 7170 serialize=True, **kwargs):
7171 """ 7172 serializes the row to a JSON object 7173 kwargs are passed to .as_dict method 7174 only "object" mode supported 7175 7176 serialize = False used by Rows.as_json 7177 TODO: return array mode with query column order 7178 7179 mode and colnames are not implemented 7180 """ 7181 7182 item = self.as_dict(**kwargs) 7183 if serialize: 7184 if have_serializers: 7185 return serializers.json(item, 7186 default=default or 7187 serializers.custom_json) 7188 elif simplejson: 7189 return simplejson.dumps(item) 7190 else: 7191 raise RuntimeError("missing simplejson") 7192 else: 7193 return item
7194
7195 7196 ################################################################################ 7197 # Everything below should be independent of the specifics of the database 7198 # and should work for RDBMs and some NoSQL databases 7199 ################################################################################ 7200 7201 -class SQLCallableList(list):
7202 - def __call__(self):
7203 return copy.copy(self)
7204
7205 -def smart_query(fields,text):
7206 if not isinstance(fields,(list,tuple)): 7207 fields = [fields] 7208 new_fields = [] 7209 for field in fields: 7210 if isinstance(field,Field): 7211 new_fields.append(field) 7212 elif isinstance(field,Table): 7213 for ofield in field: 7214 new_fields.append(ofield) 7215 else: 7216 raise RuntimeError("fields must be a list of fields") 7217 fields = new_fields 7218 field_map = {} 7219 for field in fields: 7220 n = field.name.lower() 7221 if not n in field_map: 7222 field_map[n] = field 7223 n = str(field).lower() 7224 if not n in field_map: 7225 field_map[n] = field 7226 constants = {} 7227 i = 0 7228 while True: 7229 m = REGEX_CONST_STRING.search(text) 7230 if not m: break 7231 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7232 constants[str(i)] = m.group()[1:-1] 7233 i+=1 7234 text = re.sub('\s+',' ',text).lower() 7235 for a,b in [('&','and'), 7236 ('|','or'), 7237 ('~','not'), 7238 ('==','='), 7239 ('<','<'), 7240 ('>','>'), 7241 ('<=','<='), 7242 ('>=','>='), 7243 ('<>','!='), 7244 ('=<','<='), 7245 ('=>','>='), 7246 ('=','='), 7247 (' less or equal than ','<='), 7248 (' greater or equal than ','>='), 7249 (' equal or less than ','<='), 7250 (' equal or greater than ','>='), 7251 (' less or equal ','<='), 7252 (' greater or equal ','>='), 7253 (' equal or less ','<='), 7254 (' equal or greater ','>='), 7255 (' not equal to ','!='), 7256 (' not equal ','!='), 7257 (' equal to ','='), 7258 (' equal ','='), 7259 (' equals ','='), 7260 (' less than ','<'), 7261 (' greater than ','>'), 7262 (' starts with ','startswith'), 7263 (' ends with ','endswith'), 7264 (' not in ' , 'notbelongs'), 7265 (' in ' , 'belongs'), 7266 (' is ','=')]: 7267 if a[0]==' ': 7268 text = text.replace(' is'+a,' %s ' % b) 7269 text = text.replace(a,' %s ' % b) 7270 text = re.sub('\s+',' ',text).lower() 7271 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7272 query = field = neg = op = logic = None 7273 for item in text.split(): 7274 if field is None: 7275 if item == 'not': 7276 neg = True 7277 elif not neg and not logic and item in ('and','or'): 7278 logic = item 7279 elif item in field_map: 7280 field = field_map[item] 7281 else: 7282 raise RuntimeError("Invalid syntax") 7283 elif not field is None and op is None: 7284 op = item 7285 elif not op is None: 7286 if item.startswith('#'): 7287 if not item[1:] in constants: 7288 raise RuntimeError("Invalid syntax") 7289 value = constants[item[1:]] 7290 else: 7291 value = item 7292 if field.type in ('text', 'string', 'json'): 7293 if op == '=': op = 'like' 7294 if op == '=': new_query = field==value 7295 elif op == '<': new_query = field<value 7296 elif op == '>': new_query = field>value 7297 elif op == '<=': new_query = field<=value 7298 elif op == '>=': new_query = field>=value 7299 elif op == '!=': new_query = field!=value 7300 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7301 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7302 elif field.type in ('text', 'string', 'json'): 7303 if op == 'contains': new_query = field.contains(value) 7304 elif op == 'like': new_query = field.like(value) 7305 elif op == 'startswith': new_query = field.startswith(value) 7306 elif op == 'endswith': new_query = field.endswith(value) 7307 else: raise RuntimeError("Invalid operation") 7308 elif field._db._adapter.dbengine=='google:datastore' and \ 7309 field.type in ('list:integer', 'list:string', 'list:reference'): 7310 if op == 'contains': new_query = field.contains(value) 7311 else: raise RuntimeError("Invalid operation") 7312 else: raise RuntimeError("Invalid operation") 7313 if neg: new_query = ~new_query 7314 if query is None: 7315 query = new_query 7316 elif logic == 'and': 7317 query &= new_query 7318 elif logic == 'or': 7319 query |= new_query 7320 field = op = neg = logic = None 7321 return query
7322
7323 -class DAL(object):
7324 7325 """ 7326 an instance of this class represents a database connection 7327 7328 Example:: 7329 7330 db = DAL('sqlite://test.db') 7331 7332 or 7333 7334 db = DAL(**{"uri": ..., "tables": [...]...}) # experimental 7335 7336 db.define_table('tablename', Field('fieldname1'), 7337 Field('fieldname2')) 7338 """ 7339
7340 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7341 if not hasattr(THREAD_LOCAL,'db_instances'): 7342 THREAD_LOCAL.db_instances = {} 7343 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7344 THREAD_LOCAL.db_instances_zombie = {} 7345 if uri == '<zombie>': 7346 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7347 if db_uid in THREAD_LOCAL.db_instances: 7348 db_group = THREAD_LOCAL.db_instances[db_uid] 7349 db = db_group[-1] 7350 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7351 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7352 else: 7353 db = super(DAL, cls).__new__(cls) 7354 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7355 else: 7356 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7357 if db_uid in THREAD_LOCAL.db_instances_zombie: 7358 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7359 del THREAD_LOCAL.db_instances_zombie[db_uid] 7360 else: 7361 db = super(DAL, cls).__new__(cls) 7362 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7363 db_group.append(db) 7364 THREAD_LOCAL.db_instances[db_uid] = db_group 7365 db._db_uid = db_uid 7366 return db
7367 7368 @staticmethod
7369 - def set_folder(folder):
7370 """ 7371 # ## this allows gluon to set a folder for this thread 7372 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7373 """ 7374 BaseAdapter.set_folder(folder)
7375 7376 @staticmethod
7377 - def get_instances():
7378 """ 7379 Returns a dictionary with uri as key with timings and defined tables 7380 {'sqlite://storage.sqlite': { 7381 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7382 'dbtables': { 7383 'defined': ['auth_cas', 'auth_event', 'auth_group', 7384 'auth_membership', 'auth_permission', 'auth_user'], 7385 'lazy': '[]' 7386 } 7387 } 7388 } 7389 """ 7390 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7391 infos = {} 7392 for db_uid, db_group in dbs: 7393 for db in db_group: 7394 if not db._uri: 7395 continue 7396 k = hide_password(db._uri) 7397 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7398 dbtables = {'defined': 7399 sorted(list(set(db.tables) - 7400 set(db._LAZY_TABLES.keys()))), 7401 'lazy': sorted(db._LAZY_TABLES.keys())} 7402 ) 7403 return infos
7404 7405 @staticmethod
7406 - def distributed_transaction_begin(*instances):
7407 if not instances: 7408 return 7409 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7410 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7411 instances = enumerate(instances) 7412 for (i, db) in instances: 7413 if not db._adapter.support_distributed_transaction(): 7414 raise SyntaxError( 7415 'distributed transaction not suported by %s' % db._dbname) 7416 for (i, db) in instances: 7417 db._adapter.distributed_transaction_begin(keys[i])
7418 7419 @staticmethod
7420 - def distributed_transaction_commit(*instances):
7421 if not instances: 7422 return 7423 instances = enumerate(instances) 7424 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7425 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7426 for (i, db) in instances: 7427 if not db._adapter.support_distributed_transaction(): 7428 raise SyntaxError( 7429 'distributed transaction not suported by %s' % db._dbanme) 7430 try: 7431 for (i, db) in instances: 7432 db._adapter.prepare(keys[i]) 7433 except: 7434 for (i, db) in instances: 7435 db._adapter.rollback_prepared(keys[i]) 7436 raise RuntimeError('failure to commit distributed transaction') 7437 else: 7438 for (i, db) in instances: 7439 db._adapter.commit_prepared(keys[i]) 7440 return
7441
7442 - def __init__(self, uri=DEFAULT_URI, 7443 pool_size=0, folder=None, 7444 db_codec='UTF-8', check_reserved=None, 7445 migrate=True, fake_migrate=False, 7446 migrate_enabled=True, fake_migrate_all=False, 7447 decode_credentials=False, driver_args=None, 7448 adapter_args=None, attempts=5, auto_import=False, 7449 bigint_id=False, debug=False, lazy_tables=False, 7450 db_uid=None, do_connect=True, 7451 after_connection=None, tables=None):
7452 """ 7453 Creates a new Database Abstraction Layer instance. 7454 7455 Keyword arguments: 7456 7457 :uri: string that contains information for connecting to a database. 7458 (default: 'sqlite://dummy.db') 7459 7460 experimental: you can specify a dictionary as uri 7461 parameter i.e. with 7462 db = DAL({"uri": "sqlite://storage.sqlite", 7463 "tables": {...}, ...}) 7464 7465 for an example of dict input you can check the output 7466 of the scaffolding db model with 7467 7468 db.as_dict() 7469 7470 Note that for compatibility with Python older than 7471 version 2.6.5 you should cast your dict input keys 7472 to str due to a syntax limitation on kwarg names. 7473 for proper DAL dictionary input you can use one of: 7474 7475 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7476 7477 or else (for parsing json input) 7478 7479 obj = serializers.loads_json(data, unicode_keys=False) 7480 7481 :pool_size: How many open connections to make to the database object. 7482 :folder: where .table files will be created. 7483 automatically set within web2py 7484 use an explicit path when using DAL outside web2py 7485 :db_codec: string encoding of the database (default: 'UTF-8') 7486 :check_reserved: list of adapters to check tablenames and column names 7487 against sql/nosql reserved keywords. (Default None) 7488 7489 * 'common' List of sql keywords that are common to all database types 7490 such as "SELECT, INSERT". (recommended) 7491 * 'all' Checks against all known SQL keywords. (not recommended) 7492 <adaptername> Checks against the specific adapters list of keywords 7493 (recommended) 7494 * '<adaptername>_nonreserved' Checks against the specific adapters 7495 list of nonreserved keywords. (if available) 7496 :migrate (defaults to True) sets default migrate behavior for all tables 7497 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7498 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7499 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7500 :attempts (defaults to 5). Number of times to attempt connecting 7501 :auto_import (defaults to False). If set, import automatically table definitions from the 7502 databases folder 7503 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7504 :lazy_tables (defaults to False): delay table definition until table access 7505 :after_connection (defaults to None): a callable that will be execute after the connection 7506 """ 7507 if uri == '<zombie>' and db_uid is not None: return 7508 if not decode_credentials: 7509 credential_decoder = lambda cred: cred 7510 else: 7511 credential_decoder = lambda cred: urllib.unquote(cred) 7512 self._folder = folder 7513 if folder: 7514 self.set_folder(folder) 7515 self._uri = uri 7516 self._pool_size = pool_size 7517 self._db_codec = db_codec 7518 self._lastsql = '' 7519 self._timings = [] 7520 self._pending_references = {} 7521 self._request_tenant = 'request_tenant' 7522 self._common_fields = [] 7523 self._referee_name = '%(table)s' 7524 self._bigint_id = bigint_id 7525 self._debug = debug 7526 self._migrated = [] 7527 self._LAZY_TABLES = {} 7528 self._lazy_tables = lazy_tables 7529 self._tables = SQLCallableList() 7530 self._driver_args = driver_args 7531 self._adapter_args = adapter_args 7532 self._check_reserved = check_reserved 7533 self._decode_credentials = decode_credentials 7534 self._attempts = attempts 7535 self._do_connect = do_connect 7536 7537 if not str(attempts).isdigit() or attempts < 0: 7538 attempts = 5 7539 if uri: 7540 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7541 error = '' 7542 connected = False 7543 for k in range(attempts): 7544 for uri in uris: 7545 try: 7546 if is_jdbc and not uri.startswith('jdbc:'): 7547 uri = 'jdbc:'+uri 7548 self._dbname = REGEX_DBNAME.match(uri).group() 7549 if not self._dbname in ADAPTERS: 7550 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7551 # notice that driver args or {} else driver_args 7552 # defaults to {} global, not correct 7553 kwargs = dict(db=self,uri=uri, 7554 pool_size=pool_size, 7555 folder=folder, 7556 db_codec=db_codec, 7557 credential_decoder=credential_decoder, 7558 driver_args=driver_args or {}, 7559 adapter_args=adapter_args or {}, 7560 do_connect=do_connect, 7561 after_connection=after_connection) 7562 self._adapter = ADAPTERS[self._dbname](**kwargs) 7563 types = ADAPTERS[self._dbname].types 7564 # copy so multiple DAL() possible 7565 self._adapter.types = copy.copy(types) 7566 self._adapter.build_parsemap() 7567 if bigint_id: 7568 if 'big-id' in types and 'reference' in types: 7569 self._adapter.types['id'] = types['big-id'] 7570 self._adapter.types['reference'] = types['big-reference'] 7571 connected = True 7572 break 7573 except SyntaxError: 7574 raise 7575 except Exception: 7576 tb = traceback.format_exc() 7577 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7578 if connected: 7579 break 7580 else: 7581 time.sleep(1) 7582 if not connected: 7583 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7584 else: 7585 self._adapter = BaseAdapter(db=self,pool_size=0, 7586 uri='None',folder=folder, 7587 db_codec=db_codec, after_connection=after_connection) 7588 migrate = fake_migrate = False 7589 adapter = self._adapter 7590 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7591 self.check_reserved = check_reserved 7592 if self.check_reserved: 7593 from reserved_sql_keywords import ADAPTERS as RSK 7594 self.RSK = RSK 7595 self._migrate = migrate 7596 self._fake_migrate = fake_migrate 7597 self._migrate_enabled = migrate_enabled 7598 self._fake_migrate_all = fake_migrate_all 7599 if auto_import or tables: 7600 self.import_table_definitions(adapter.folder, 7601 tables=tables)
7602 7603 @property
7604 - def tables(self):
7605 return self._tables
7606
7607 - def import_table_definitions(self, path, migrate=False, 7608 fake_migrate=False, tables=None):
7609 pattern = pjoin(path,self._uri_hash+'_*.table') 7610 if tables: 7611 for table in tables: 7612 self.define_table(**table) 7613 else: 7614 for filename in glob.glob(pattern): 7615 tfile = self._adapter.file_open(filename, 'r') 7616 try: 7617 sql_fields = pickle.load(tfile) 7618 name = filename[len(pattern)-7:-6] 7619 mf = [(value['sortable'], 7620 Field(key, 7621 type=value['type'], 7622 length=value.get('length',None), 7623 notnull=value.get('notnull',False), 7624 unique=value.get('unique',False))) \ 7625 for key, value in sql_fields.iteritems()] 7626 mf.sort(lambda a,b: cmp(a[0],b[0])) 7627 self.define_table(name,*[item[1] for item in mf], 7628 **dict(migrate=migrate, 7629 fake_migrate=fake_migrate)) 7630 finally: 7631 self._adapter.file_close(tfile)
7632
7633 - def check_reserved_keyword(self, name):
7634 """ 7635 Validates ``name`` against SQL keywords 7636 Uses self.check_reserve which is a list of 7637 operators to use. 7638 self.check_reserved 7639 ['common', 'postgres', 'mysql'] 7640 self.check_reserved 7641 ['all'] 7642 """ 7643 for backend in self.check_reserved: 7644 if name.upper() in self.RSK[backend]: 7645 raise SyntaxError( 7646 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7647
7648 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7649 """ 7650 EXAMPLE: 7651 7652 db.define_table('person',Field('name'),Field('info')) 7653 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7654 7655 @request.restful() 7656 def index(): 7657 def GET(*args,**vars): 7658 patterns = [ 7659 "/friends[person]", 7660 "/{person.name}/:field", 7661 "/{person.name}/pets[pet.ownedby]", 7662 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7663 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7664 ("/dogs[pet]", db.pet.info=='dog'), 7665 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7666 ] 7667 parser = db.parse_as_rest(patterns,args,vars) 7668 if parser.status == 200: 7669 return dict(content=parser.response) 7670 else: 7671 raise HTTP(parser.status,parser.error) 7672 7673 def POST(table_name,**vars): 7674 if table_name == 'person': 7675 return db.person.validate_and_insert(**vars) 7676 elif table_name == 'pet': 7677 return db.pet.validate_and_insert(**vars) 7678 else: 7679 raise HTTP(400) 7680 return locals() 7681 """ 7682 7683 db = self 7684 re1 = REGEX_SEARCH_PATTERN 7685 re2 = REGEX_SQUARE_BRACKETS 7686 7687 def auto_table(table,base='',depth=0): 7688 patterns = [] 7689 for field in db[table].fields: 7690 if base: 7691 tag = '%s/%s' % (base,field.replace('_','-')) 7692 else: 7693 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7694 f = db[table][field] 7695 if not f.readable: continue 7696 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7697 tag += '/{%s.%s}' % (table,field) 7698 patterns.append(tag) 7699 patterns.append(tag+'/:field') 7700 elif f.type.startswith('boolean'): 7701 tag += '/{%s.%s}' % (table,field) 7702 patterns.append(tag) 7703 patterns.append(tag+'/:field') 7704 elif f.type in ('float','double','integer','bigint'): 7705 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7706 patterns.append(tag) 7707 patterns.append(tag+'/:field') 7708 elif f.type.startswith('list:'): 7709 tag += '/{%s.%s.contains}' % (table,field) 7710 patterns.append(tag) 7711 patterns.append(tag+'/:field') 7712 elif f.type in ('date','datetime'): 7713 tag+= '/{%s.%s.year}' % (table,field) 7714 patterns.append(tag) 7715 patterns.append(tag+'/:field') 7716 tag+='/{%s.%s.month}' % (table,field) 7717 patterns.append(tag) 7718 patterns.append(tag+'/:field') 7719 tag+='/{%s.%s.day}' % (table,field) 7720 patterns.append(tag) 7721 patterns.append(tag+'/:field') 7722 if f.type in ('datetime','time'): 7723 tag+= '/{%s.%s.hour}' % (table,field) 7724 patterns.append(tag) 7725 patterns.append(tag+'/:field') 7726 tag+='/{%s.%s.minute}' % (table,field) 7727 patterns.append(tag) 7728 patterns.append(tag+'/:field') 7729 tag+='/{%s.%s.second}' % (table,field) 7730 patterns.append(tag) 7731 patterns.append(tag+'/:field') 7732 if depth>0: 7733 for f in db[table]._referenced_by: 7734 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7735 patterns.append(tag) 7736 patterns += auto_table(table,base=tag,depth=depth-1) 7737 return patterns
7738 7739 if patterns == 'auto': 7740 patterns=[] 7741 for table in db.tables: 7742 if not table.startswith('auth_'): 7743 patterns.append('/%s[%s]' % (table,table)) 7744 patterns += auto_table(table,base='',depth=1) 7745 else: 7746 i = 0 7747 while i<len(patterns): 7748 pattern = patterns[i] 7749 if not isinstance(pattern,str): 7750 pattern = pattern[0] 7751 tokens = pattern.split('/') 7752 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7753 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7754 '/'.join(tokens[:-1])) 7755 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7756 i += len(new_patterns) 7757 else: 7758 i += 1 7759 if '/'.join(args) == 'patterns': 7760 return Row({'status':200,'pattern':'list', 7761 'error':None,'response':patterns}) 7762 for pattern in patterns: 7763 basequery, exposedfields = None, [] 7764 if isinstance(pattern,tuple): 7765 if len(pattern)==2: 7766 pattern, basequery = pattern 7767 elif len(pattern)>2: 7768 pattern, basequery, exposedfields = pattern[0:3] 7769 otable=table=None 7770 if not isinstance(queries,dict): 7771 dbset=db(queries) 7772 if basequery is not None: 7773 dbset = dbset(basequery) 7774 i=0 7775 tags = pattern[1:].split('/') 7776 if len(tags)!=len(args): 7777 continue 7778 for tag in tags: 7779 if re1.match(tag): 7780 # print 're1:'+tag 7781 tokens = tag[1:-1].split('.') 7782 table, field = tokens[0], tokens[1] 7783 if not otable or table == otable: 7784 if len(tokens)==2 or tokens[2]=='eq': 7785 query = db[table][field]==args[i] 7786 elif tokens[2]=='ne': 7787 query = db[table][field]!=args[i] 7788 elif tokens[2]=='lt': 7789 query = db[table][field]<args[i] 7790 elif tokens[2]=='gt': 7791 query = db[table][field]>args[i] 7792 elif tokens[2]=='ge': 7793 query = db[table][field]>=args[i] 7794 elif tokens[2]=='le': 7795 query = db[table][field]<=args[i] 7796 elif tokens[2]=='year': 7797 query = db[table][field].year()==args[i] 7798 elif tokens[2]=='month': 7799 query = db[table][field].month()==args[i] 7800 elif tokens[2]=='day': 7801 query = db[table][field].day()==args[i] 7802 elif tokens[2]=='hour': 7803 query = db[table][field].hour()==args[i] 7804 elif tokens[2]=='minute': 7805 query = db[table][field].minutes()==args[i] 7806 elif tokens[2]=='second': 7807 query = db[table][field].seconds()==args[i] 7808 elif tokens[2]=='startswith': 7809 query = db[table][field].startswith(args[i]) 7810 elif tokens[2]=='contains': 7811 query = db[table][field].contains(args[i]) 7812 else: 7813 raise RuntimeError("invalid pattern: %s" % pattern) 7814 if len(tokens)==4 and tokens[3]=='not': 7815 query = ~query 7816 elif len(tokens)>=4: 7817 raise RuntimeError("invalid pattern: %s" % pattern) 7818 if not otable and isinstance(queries,dict): 7819 dbset = db(queries[table]) 7820 if basequery is not None: 7821 dbset = dbset(basequery) 7822 dbset=dbset(query) 7823 else: 7824 raise RuntimeError("missing relation in pattern: %s" % pattern) 7825 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7826 ref = tag[tag.find('[')+1:-1] 7827 if '.' in ref and otable: 7828 table,field = ref.split('.') 7829 selfld = '_id' 7830 if db[table][field].type.startswith('reference '): 7831 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7832 else: 7833 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7834 if refs: 7835 selfld = refs[0] 7836 if nested_select: 7837 try: 7838 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7839 except ValueError: 7840 return Row({'status':400,'pattern':pattern, 7841 'error':'invalid path','response':None}) 7842 else: 7843 items = [item.id for item in dbset.select(db[otable][selfld])] 7844 dbset=db(db[table][field].belongs(items)) 7845 else: 7846 table = ref 7847 if not otable and isinstance(queries,dict): 7848 dbset = db(queries[table]) 7849 dbset=dbset(db[table]) 7850 elif tag==':field' and table: 7851 # print 're3:'+tag 7852 field = args[i] 7853 if not field in db[table]: break 7854 # hand-built patterns should respect .readable=False as well 7855 if not db[table][field].readable: 7856 return Row({'status':418,'pattern':pattern, 7857 'error':'I\'m a teapot','response':None}) 7858 try: 7859 distinct = vars.get('distinct', False) == 'True' 7860 offset = long(vars.get('offset',None) or 0) 7861 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7862 except ValueError: 7863 return Row({'status':400,'error':'invalid limits','response':None}) 7864 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7865 if items: 7866 return Row({'status':200,'response':items, 7867 'pattern':pattern}) 7868 else: 7869 return Row({'status':404,'pattern':pattern, 7870 'error':'no record found','response':None}) 7871 elif tag != args[i]: 7872 break 7873 otable = table 7874 i += 1 7875 if i==len(tags) and table: 7876 ofields = vars.get('order',db[table]._id.name).split('|') 7877 try: 7878 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7879 except (KeyError, AttributeError): 7880 return Row({'status':400,'error':'invalid orderby','response':None}) 7881 if exposedfields: 7882 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7883 else: 7884 fields = [field for field in db[table] if field.readable] 7885 count = dbset.count() 7886 try: 7887 offset = long(vars.get('offset',None) or 0) 7888 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7889 except ValueError: 7890 return Row({'status':400,'error':'invalid limits','response':None}) 7891 if count > limits[1]-limits[0]: 7892 return Row({'status':400,'error':'too many records','response':None}) 7893 try: 7894 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7895 except ValueError: 7896 return Row({'status':400,'pattern':pattern, 7897 'error':'invalid path','response':None}) 7898 return Row({'status':200,'response':response, 7899 'pattern':pattern,'count':count}) 7900 return Row({'status':400,'error':'no matching pattern','response':None})
7901
7902 - def define_table( 7903 self, 7904 tablename, 7905 *fields, 7906 **args 7907 ):
7908 if not fields and 'fields' in args: 7909 fields = args.get('fields',()) 7910 if not isinstance(tablename, str): 7911 if isinstance(tablename, unicode): 7912 try: 7913 tablename = str(tablename) 7914 except UnicodeEncodeError: 7915 raise SyntaxError("invalid unicode table name") 7916 else: 7917 raise SyntaxError("missing table name") 7918 elif hasattr(self,tablename) or tablename in self.tables: 7919 if not args.get('redefine',False): 7920 raise SyntaxError('table already defined: %s' % tablename) 7921 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7922 REGEX_PYTHON_KEYWORDS.match(tablename): 7923 raise SyntaxError('invalid table name: %s' % tablename) 7924 elif self.check_reserved: 7925 self.check_reserved_keyword(tablename) 7926 else: 7927 invalid_args = set(args)-TABLE_ARGS 7928 if invalid_args: 7929 raise SyntaxError('invalid table "%s" attributes: %s' \ 7930 % (tablename,invalid_args)) 7931 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7932 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7933 table = None 7934 else: 7935 table = self.lazy_define_table(tablename,*fields,**args) 7936 if not tablename in self.tables: 7937 self.tables.append(tablename) 7938 return table
7939
7940 - def lazy_define_table( 7941 self, 7942 tablename, 7943 *fields, 7944 **args 7945 ):
7946 args_get = args.get 7947 common_fields = self._common_fields 7948 if common_fields: 7949 fields = list(fields) + list(common_fields) 7950 7951 table_class = args_get('table_class',Table) 7952 table = table_class(self, tablename, *fields, **args) 7953 table._actual = True 7954 self[tablename] = table 7955 # must follow above line to handle self references 7956 table._create_references() 7957 for field in table: 7958 if field.requires == DEFAULT: 7959 field.requires = sqlhtml_validators(field) 7960 7961 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7962 if migrate and not self._uri in (None,'None') \ 7963 or self._adapter.dbengine=='google:datastore': 7964 fake_migrate = self._fake_migrate_all or \ 7965 args_get('fake_migrate',self._fake_migrate) 7966 polymodel = args_get('polymodel',None) 7967 try: 7968 GLOBAL_LOCKER.acquire() 7969 self._lastsql = self._adapter.create_table( 7970 table,migrate=migrate, 7971 fake_migrate=fake_migrate, 7972 polymodel=polymodel) 7973 finally: 7974 GLOBAL_LOCKER.release() 7975 else: 7976 table._dbt = None 7977 on_define = args_get('on_define',None) 7978 if on_define: on_define(table) 7979 return table
7980
7981 - def as_dict(self, flat=False, sanitize=True):
7982 db_uid = uri = None 7983 if not sanitize: 7984 uri, db_uid = (self._uri, self._db_uid) 7985 db_as_dict = dict(tables=[], uri=uri, db_uid=db_uid, 7986 **dict([(k, getattr(self, "_" + k, None)) 7987 for k in 'pool_size','folder','db_codec', 7988 'check_reserved','migrate','fake_migrate', 7989 'migrate_enabled','fake_migrate_all', 7990 'decode_credentials','driver_args', 7991 'adapter_args', 'attempts', 7992 'bigint_id','debug','lazy_tables', 7993 'do_connect'])) 7994 for table in self: 7995 db_as_dict["tables"].append(table.as_dict(flat=flat, 7996 sanitize=sanitize)) 7997 return db_as_dict
7998
7999 - def as_xml(self, sanitize=True):
8000 if not have_serializers: 8001 raise ImportError("No xml serializers available") 8002 d = self.as_dict(flat=True, sanitize=sanitize) 8003 return serializers.xml(d)
8004
8005 - def as_json(self, sanitize=True):
8006 if not have_serializers: 8007 raise ImportError("No json serializers available") 8008 d = self.as_dict(flat=True, sanitize=sanitize) 8009 return serializers.json(d)
8010
8011 - def as_yaml(self, sanitize=True):
8012 if not have_serializers: 8013 raise ImportError("No YAML serializers available") 8014 d = self.as_dict(flat=True, sanitize=sanitize) 8015 return serializers.yaml(d)
8016
8017 - def __contains__(self, tablename):
8018 try: 8019 return tablename in self.tables 8020 except AttributeError: 8021 # The instance has no .tables attribute yet 8022 return False
8023 8024 has_key = __contains__ 8025
8026 - def get(self,key,default=None):
8027 return self.__dict__.get(key,default)
8028
8029 - def __iter__(self):
8030 for tablename in self.tables: 8031 yield self[tablename]
8032
8033 - def __getitem__(self, key):
8034 return self.__getattr__(str(key))
8035
8036 - def __getattr__(self, key):
8037 if ogetattr(self,'_lazy_tables') and \ 8038 key in ogetattr(self,'_LAZY_TABLES'): 8039 tablename, fields, args = self._LAZY_TABLES.pop(key) 8040 return self.lazy_define_table(tablename,*fields,**args) 8041 return ogetattr(self, key)
8042
8043 - def __setitem__(self, key, value):
8044 osetattr(self, str(key), value)
8045
8046 - def __setattr__(self, key, value):
8047 if key[:1]!='_' and key in self: 8048 raise SyntaxError( 8049 'Object %s exists and cannot be redefined' % key) 8050 osetattr(self,key,value)
8051 8052 __delitem__ = object.__delattr__ 8053
8054 - def __repr__(self):
8055 if hasattr(self,'_uri'): 8056 return '<DAL uri="%s">' % hide_password(str(self._uri)) 8057 else: 8058 return '<DAL db_uid="%s">' % self._db_uid
8059
8060 - def smart_query(self,fields,text):
8061 return Set(self, smart_query(fields,text))
8062
8063 - def __call__(self, query=None, ignore_common_filters=None):
8064 if isinstance(query,Table): 8065 query = self._adapter.id_query(query) 8066 elif isinstance(query,Field): 8067 query = query!=None 8068 elif isinstance(query, dict): 8069 icf = query.get("ignore_common_filters") 8070 if icf: ignore_common_filters = icf 8071 return Set(self, query, ignore_common_filters=ignore_common_filters)
8072
8073 - def commit(self):
8074 self._adapter.commit()
8075
8076 - def rollback(self):
8077 self._adapter.rollback()
8078
8079 - def close(self):
8080 self._adapter.close() 8081 if self._db_uid in THREAD_LOCAL.db_instances: 8082 db_group = THREAD_LOCAL.db_instances[self._db_uid] 8083 db_group.remove(self) 8084 if not db_group: 8085 del THREAD_LOCAL.db_instances[self._db_uid]
8086
8087 - def executesql(self, query, placeholders=None, as_dict=False, 8088 fields=None, colnames=None):
8089 """ 8090 placeholders is optional and will always be None. 8091 If using raw SQL with placeholders, placeholders may be 8092 a sequence of values to be substituted in 8093 or, (if supported by the DB driver), a dictionary with keys 8094 matching named placeholders in your SQL. 8095 8096 Added 2009-12-05 "as_dict" optional argument. Will always be 8097 None when using DAL. If using raw SQL can be set to True 8098 and the results cursor returned by the DB driver will be 8099 converted to a sequence of dictionaries keyed with the db 8100 field names. Tested with SQLite but should work with any database 8101 since the cursor.description used to get field names is part of the 8102 Python dbi 2.0 specs. Results returned with as_dict=True are 8103 the same as those returned when applying .to_list() to a DAL query. 8104 8105 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 8106 8107 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 8108 is provided, the results cursor returned by the DB driver will be 8109 converted to a DAL Rows object using the db._adapter.parse() method. 8110 8111 The "fields" argument is a list of DAL Field objects that match the 8112 fields returned from the DB. The Field objects should be part of one or 8113 more Table objects defined on the DAL object. The "fields" list can 8114 include one or more DAL Table objects in addition to or instead of 8115 including Field objects, or it can be just a single table (not in a 8116 list). In that case, the Field objects will be extracted from the 8117 table(s). 8118 8119 Instead of specifying the "fields" argument, the "colnames" argument 8120 can be specified as a list of field names in tablename.fieldname format. 8121 Again, these should represent tables and fields defined on the DAL 8122 object. 8123 8124 It is also possible to specify both "fields" and the associated 8125 "colnames". In that case, "fields" can also include DAL Expression 8126 objects in addition to Field objects. For Field objects in "fields", 8127 the associated "colnames" must still be in tablename.fieldname format. 8128 For Expression objects in "fields", the associated "colnames" can 8129 be any arbitrary labels. 8130 8131 Note, the DAL Table objects referred to by "fields" or "colnames" can 8132 be dummy tables and do not have to represent any real tables in the 8133 database. Also, note that the "fields" and "colnames" must be in the 8134 same order as the fields in the results cursor returned from the DB. 8135 """ 8136 adapter = self._adapter 8137 if placeholders: 8138 adapter.execute(query, placeholders) 8139 else: 8140 adapter.execute(query) 8141 if as_dict: 8142 if not hasattr(adapter.cursor,'description'): 8143 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8144 # Non-DAL legacy db query, converts cursor results to dict. 8145 # sequence of 7-item sequences. each sequence tells about a column. 8146 # first item is always the field name according to Python Database API specs 8147 columns = adapter.cursor.description 8148 # reduce the column info down to just the field names 8149 fields = [f[0] for f in columns] 8150 # will hold our finished resultset in a list 8151 data = adapter._fetchall() 8152 # convert the list for each row into a dictionary so it's 8153 # easier to work with. row['field_name'] rather than row[0] 8154 return [dict(zip(fields,row)) for row in data] 8155 try: 8156 data = adapter._fetchall() 8157 except: 8158 return None 8159 if fields or colnames: 8160 fields = [] if fields is None else fields 8161 if not isinstance(fields, list): 8162 fields = [fields] 8163 extracted_fields = [] 8164 for field in fields: 8165 if isinstance(field, Table): 8166 extracted_fields.extend([f for f in field]) 8167 else: 8168 extracted_fields.append(field) 8169 if not colnames: 8170 colnames = ['%s.%s' % (f.tablename, f.name) 8171 for f in extracted_fields] 8172 data = adapter.parse( 8173 data, fields=extracted_fields, colnames=colnames) 8174 return data
8175
8176 - def _remove_references_to(self, thistable):
8177 for table in self: 8178 table._referenced_by = [field for field in table._referenced_by 8179 if not field.table==thistable]
8180
8181 - def export_to_csv_file(self, ofile, *args, **kwargs):
8182 step = long(kwargs.get('max_fetch_rows,',500)) 8183 write_colnames = kwargs['write_colnames'] = \ 8184 kwargs.get("write_colnames", True) 8185 for table in self.tables: 8186 ofile.write('TABLE %s\r\n' % table) 8187 query = self._adapter.id_query(self[table]) 8188 nrows = self(query).count() 8189 kwargs['write_colnames'] = write_colnames 8190 for k in range(0,nrows,step): 8191 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8192 ofile, *args, **kwargs) 8193 kwargs['write_colnames'] = False 8194 ofile.write('\r\n\r\n') 8195 ofile.write('END')
8196
8197 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8198 unique='uuid', map_tablenames=None, 8199 ignore_missing_tables=False, 8200 *args, **kwargs):
8201 #if id_map is None: id_map={} 8202 id_offset = {} # only used if id_map is None 8203 map_tablenames = map_tablenames or {} 8204 for line in ifile: 8205 line = line.strip() 8206 if not line: 8207 continue 8208 elif line == 'END': 8209 return 8210 elif not line.startswith('TABLE ') or \ 8211 not line[6:] in self.tables: 8212 raise SyntaxError('invalid file format') 8213 else: 8214 tablename = line[6:] 8215 tablename = map_tablenames.get(tablename,tablename) 8216 if tablename is not None and tablename in self.tables: 8217 self[tablename].import_from_csv_file( 8218 ifile, id_map, null, unique, id_offset, 8219 *args, **kwargs) 8220 elif tablename is None or ignore_missing_tables: 8221 # skip all non-empty lines 8222 for line in ifile: 8223 if not line.strip(): 8224 break 8225 else: 8226 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8227
8228 8229 -def DAL_unpickler(db_uid):
8230 return DAL('<zombie>',db_uid=db_uid)
8231
8232 -def DAL_pickler(db):
8233 return DAL_unpickler, (db._db_uid,)
8234 8235 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8236 8237 -class SQLALL(object):
8238 """ 8239 Helper class providing a comma-separated string having all the field names 8240 (prefixed by table name and '.') 8241 8242 normally only called from within gluon.sql 8243 """ 8244
8245 - def __init__(self, table):
8246 self._table = table
8247
8248 - def __str__(self):
8249 return ', '.join([str(field) for field in self._table])
8250
8251 # class Reference(int): 8252 -class Reference(long):
8253
8254 - def __allocate(self):
8255 if not self._record: 8256 self._record = self._table[long(self)] 8257 if not self._record: 8258 raise RuntimeError( 8259 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8260
8261 - def __getattr__(self, key):
8262 if key == 'id': 8263 return long(self) 8264 if key in self._table: 8265 self.__allocate() 8266 if self._record: 8267 return self._record.get(key,None) # to deal with case self.update_record() 8268 else: 8269 return None
8270
8271 - def get(self, key, default=None):
8272 return self.__getattr__(key, default)
8273
8274 - def __setattr__(self, key, value):
8275 if key.startswith('_'): 8276 long.__setattr__(self, key, value) 8277 return 8278 self.__allocate() 8279 self._record[key] = value
8280
8281 - def __getitem__(self, key):
8282 if key == 'id': 8283 return long(self) 8284 self.__allocate() 8285 return self._record.get(key, None)
8286
8287 - def __setitem__(self,key,value):
8288 self.__allocate() 8289 self._record[key] = value
8290
8291 8292 -def Reference_unpickler(data):
8293 return marshal.loads(data)
8294
8295 -def Reference_pickler(data):
8296 try: 8297 marshal_dump = marshal.dumps(long(data)) 8298 except AttributeError: 8299 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8300 return (Reference_unpickler, (marshal_dump,))
8301 8302 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8303 8304 -class MethodAdder(object):
8305 - def __init__(self,table):
8306 self.table = table
8307 - def __call__(self):
8308 return self.register()
8309 - def __getattr__(self,method_name):
8310 return self.register(method_name)
8311 - def register(self,method_name=None):
8312 def _decorated(f): 8313 instance = self.table 8314 import types 8315 method = types.MethodType(f, instance, instance.__class__) 8316 name = method_name or f.func_name 8317 setattr(instance, name, method) 8318 return f
8319 return _decorated
8320
8321 -class Table(object):
8322 8323 """ 8324 an instance of this class represents a database table 8325 8326 Example:: 8327 8328 db = DAL(...) 8329 db.define_table('users', Field('name')) 8330 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8331 db.users.drop() 8332 """ 8333
8334 - def __init__( 8335 self, 8336 db, 8337 tablename, 8338 *fields, 8339 **args 8340 ):
8341 """ 8342 Initializes the table and performs checking on the provided fields. 8343 8344 Each table will have automatically an 'id'. 8345 8346 If a field is of type Table, the fields (excluding 'id') from that table 8347 will be used instead. 8348 8349 :raises SyntaxError: when a supplied field is of incorrect type. 8350 """ 8351 self._actual = False # set to True by define_table() 8352 self._tablename = tablename 8353 self._ot = args.get('actual_name') 8354 self._sequence_name = args.get('sequence_name') or \ 8355 db and db._adapter.sequence_name(tablename) 8356 self._trigger_name = args.get('trigger_name') or \ 8357 db and db._adapter.trigger_name(tablename) 8358 self._common_filter = args.get('common_filter') 8359 self._format = args.get('format') 8360 self._singular = args.get( 8361 'singular',tablename.replace('_',' ').capitalize()) 8362 self._plural = args.get( 8363 'plural',pluralize(self._singular.lower()).capitalize()) 8364 # horrible but for backard compatibility of appamdin: 8365 if 'primarykey' in args and args['primarykey'] is not None: 8366 self._primarykey = args.get('primarykey') 8367 8368 self._before_insert = [] 8369 self._before_update = [Set.delete_uploaded_files] 8370 self._before_delete = [Set.delete_uploaded_files] 8371 self._after_insert = [] 8372 self._after_update = [] 8373 self._after_delete = [] 8374 8375 self.add_method = MethodAdder(self) 8376 8377 fieldnames,newfields=set(),[] 8378 _primarykey = getattr(self, '_primarykey', None) 8379 if _primarykey is not None: 8380 if not isinstance(_primarykey, list): 8381 raise SyntaxError( 8382 "primarykey must be a list of fields from table '%s'" \ 8383 % tablename) 8384 if len(_primarykey)==1: 8385 self._id = [f for f in fields if isinstance(f,Field) \ 8386 and f.name==_primarykey[0]][0] 8387 elif not [f for f in fields if (isinstance(f,Field) and 8388 f.type=='id') or (isinstance(f, dict) and 8389 f.get("type", None)=="id")]: 8390 field = Field('id', 'id') 8391 newfields.append(field) 8392 fieldnames.add('id') 8393 self._id = field 8394 virtual_fields = [] 8395 def include_new(field): 8396 newfields.append(field) 8397 fieldnames.add(field.name) 8398 if field.type=='id': 8399 self._id = field
8400 for field in fields: 8401 if isinstance(field, (FieldMethod, FieldVirtual)): 8402 virtual_fields.append(field) 8403 elif isinstance(field, Field) and not field.name in fieldnames: 8404 if field.db is not None: 8405 field = copy.copy(field) 8406 include_new(field) 8407 elif isinstance(field, dict) and not field['fieldname'] in fieldnames: 8408 include_new(Field(**field)) 8409 elif isinstance(field, Table): 8410 table = field 8411 for field in table: 8412 if not field.name in fieldnames and not field.type=='id': 8413 t2 = not table._actual and self._tablename 8414 include_new(field.clone(point_self_references_to=t2)) 8415 elif not isinstance(field, (Field, Table)): 8416 raise SyntaxError( 8417 'define_table argument is not a Field or Table: %s' % field) 8418 fields = newfields 8419 self._db = db 8420 tablename = tablename 8421 self._fields = SQLCallableList() 8422 self.virtualfields = [] 8423 fields = list(fields) 8424 8425 if db and db._adapter.uploads_in_blob==True: 8426 uploadfields = [f.name for f in fields if f.type=='blob'] 8427 for field in fields: 8428 fn = field.uploadfield 8429 if isinstance(field, Field) and field.type == 'upload'\ 8430 and fn is True: 8431 fn = field.uploadfield = '%s_blob' % field.name 8432 if isinstance(fn,str) and not fn in uploadfields: 8433 fields.append(Field(fn,'blob',default='', 8434 writable=False,readable=False)) 8435 8436 lower_fieldnames = set() 8437 reserved = dir(Table) + ['fields'] 8438 if (db and db.check_reserved): 8439 check_reserved = db.check_reserved_keyword 8440 else: 8441 def check_reserved(field_name): 8442 if field_name in reserved: 8443 raise SyntaxError("field name %s not allowed" % field_name)
8444 for field in fields: 8445 field_name = field.name 8446 check_reserved(field_name) 8447 fn_lower = field_name.lower() 8448 if fn_lower in lower_fieldnames: 8449 raise SyntaxError("duplicate field %s in table %s" \ 8450 % (field_name, tablename)) 8451 else: 8452 lower_fieldnames.add(fn_lower) 8453 8454 self.fields.append(field_name) 8455 self[field_name] = field 8456 if field.type == 'id': 8457 self['id'] = field 8458 field.tablename = field._tablename = tablename 8459 field.table = field._table = self 8460 field.db = field._db = db 8461 self.ALL = SQLALL(self) 8462 8463 if _primarykey is not None: 8464 for k in _primarykey: 8465 if k not in self.fields: 8466 raise SyntaxError( 8467 "primarykey must be a list of fields from table '%s " % tablename) 8468 else: 8469 self[k].notnull = True 8470 for field in virtual_fields: 8471 self[field.name] = field 8472 8473 @property
8474 - def fields(self):
8475 return self._fields
8476
8477 - def update(self,*args,**kwargs):
8478 raise RuntimeError("Syntax Not Supported")
8479
8480 - def _enable_record_versioning(self, 8481 archive_db=None, 8482 archive_name = '%(tablename)s_archive', 8483 is_active = 'is_active', 8484 current_record = 'current_record', 8485 current_record_label = None):
8486 db = self._db 8487 archive_db = archive_db or db 8488 archive_name = archive_name % dict(tablename=self._tablename) 8489 if archive_name in archive_db.tables(): 8490 return # do not try define the archive if already exists 8491 fieldnames = self.fields() 8492 same_db = archive_db is db 8493 field_type = self if same_db else 'bigint' 8494 clones = [] 8495 for field in self: 8496 nfk = same_db or not field.type.startswith('reference') 8497 clones.append(field.clone( 8498 unique=False, type=field.type if nfk else 'bigint')) 8499 archive_db.define_table( 8500 archive_name, 8501 Field(current_record,field_type,label=current_record_label), 8502 *clones,**dict(format=self._format)) 8503 8504 self._before_update.append( 8505 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8506 archive_record(qset,fs,db[an],cn)) 8507 if is_active and is_active in fieldnames: 8508 self._before_delete.append( 8509 lambda qset: qset.update(is_active=False)) 8510 newquery = lambda query, t=self, name=self._tablename: \ 8511 reduce(AND,[db[tn].is_active == True 8512 for tn in db._adapter.tables(query) 8513 if tn==name or getattr(db[tn],'_ot',None)==name]) 8514 query = self._common_filter 8515 if query: 8516 newquery = query & newquery 8517 self._common_filter = newquery
8518
8519 - def _validate(self,**vars):
8520 errors = Row() 8521 for key,value in vars.iteritems(): 8522 value,error = self[key].validate(value) 8523 if error: 8524 errors[key] = error 8525 return errors
8526
8527 - def _create_references(self):
8528 db = self._db 8529 pr = db._pending_references 8530 self._referenced_by = [] 8531 self._references = [] 8532 for field in self: 8533 fieldname = field.name 8534 field_type = field.type 8535 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8536 ref = field_type[10:].strip() 8537 if not ref: 8538 SyntaxError('Table: reference to nothing: %s' %ref) 8539 if '.' in ref: 8540 rtablename, throw_it,rfieldname = ref.partition('.') 8541 else: 8542 rtablename, rfieldname = ref, None 8543 if not rtablename in db: 8544 pr[rtablename] = pr.get(rtablename,[]) + [field] 8545 continue 8546 rtable = db[rtablename] 8547 if rfieldname: 8548 if not hasattr(rtable,'_primarykey'): 8549 raise SyntaxError( 8550 'keyed tables can only reference other keyed tables (for now)') 8551 if rfieldname not in rtable.fields: 8552 raise SyntaxError( 8553 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8554 % (rfieldname, rtablename, self._tablename)) 8555 rfield = rtable[rfieldname] 8556 else: 8557 rfield = rtable._id 8558 rtable._referenced_by.append(field) 8559 field.referent = rfield 8560 self._references.append(field) 8561 else: 8562 field.referent = None 8563 for referee in pr.get(self._tablename,[]): 8564 self._referenced_by.append(referee)
8565
8566 - def _filter_fields(self, record, id=False):
8567 return dict([(k, v) for (k, v) in record.iteritems() if k 8568 in self.fields and (self[k].type!='id' or id)])
8569
8570 - def _build_query(self,key):
8571 """ for keyed table only """ 8572 query = None 8573 for k,v in key.iteritems(): 8574 if k in self._primarykey: 8575 if query: 8576 query = query & (self[k] == v) 8577 else: 8578 query = (self[k] == v) 8579 else: 8580 raise SyntaxError( 8581 'Field %s is not part of the primary key of %s' % \ 8582 (k,self._tablename)) 8583 return query
8584
8585 - def __getitem__(self, key):
8586 if not key: 8587 return None 8588 elif isinstance(key, dict): 8589 """ for keyed table """ 8590 query = self._build_query(key) 8591 return self._db(query).select(limitby=(0,1), orderby_on_limitby=False).first() 8592 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8593 return self._db(self._id == key).select(limitby=(0,1), orderby_on_limitby=False).first() 8594 elif key: 8595 return ogetattr(self, str(key))
8596
8597 - def __call__(self, key=DEFAULT, **kwargs):
8598 for_update = kwargs.get('_for_update',False) 8599 if '_for_update' in kwargs: del kwargs['_for_update'] 8600 8601 orderby = kwargs.get('_orderby',None) 8602 if '_orderby' in kwargs: del kwargs['_orderby'] 8603 8604 if not key is DEFAULT: 8605 if isinstance(key, Query): 8606 record = self._db(key).select( 8607 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8608 elif not str(key).isdigit(): 8609 record = None 8610 else: 8611 record = self._db(self._id == key).select( 8612 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8613 if record: 8614 for k,v in kwargs.iteritems(): 8615 if record[k]!=v: return None 8616 return record 8617 elif kwargs: 8618 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8619 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8620 else: 8621 return None
8622
8623 - def __setitem__(self, key, value):
8624 if isinstance(key, dict) and isinstance(value, dict): 8625 """ option for keyed table """ 8626 if set(key.keys()) == set(self._primarykey): 8627 value = self._filter_fields(value) 8628 kv = {} 8629 kv.update(value) 8630 kv.update(key) 8631 if not self.insert(**kv): 8632 query = self._build_query(key) 8633 self._db(query).update(**self._filter_fields(value)) 8634 else: 8635 raise SyntaxError( 8636 'key must have all fields from primary key: %s'%\ 8637 (self._primarykey)) 8638 elif str(key).isdigit(): 8639 if key == 0: 8640 self.insert(**self._filter_fields(value)) 8641 elif self._db(self._id == key)\ 8642 .update(**self._filter_fields(value)) is None: 8643 raise SyntaxError('No such record: %s' % key) 8644 else: 8645 if isinstance(key, dict): 8646 raise SyntaxError( 8647 'value must be a dictionary: %s' % value) 8648 osetattr(self, str(key), value)
8649 8650 __getattr__ = __getitem__ 8651
8652 - def __setattr__(self, key, value):
8653 if key[:1]!='_' and key in self: 8654 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8655 osetattr(self,key,value)
8656
8657 - def __delitem__(self, key):
8658 if isinstance(key, dict): 8659 query = self._build_query(key) 8660 if not self._db(query).delete(): 8661 raise SyntaxError('No such record: %s' % key) 8662 elif not str(key).isdigit() or \ 8663 not self._db(self._id == key).delete(): 8664 raise SyntaxError('No such record: %s' % key)
8665
8666 - def __contains__(self,key):
8667 return hasattr(self,key)
8668 8669 has_key = __contains__ 8670
8671 - def items(self):
8672 return self.__dict__.items()
8673
8674 - def __iter__(self):
8675 for fieldname in self.fields: 8676 yield self[fieldname]
8677
8678 - def iteritems(self):
8679 return self.__dict__.iteritems()
8680 8681
8682 - def __repr__(self):
8683 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8684
8685 - def __str__(self):
8686 if self._ot is not None: 8687 ot = self._db._adapter.QUOTE_TEMPLATE % self._ot 8688 if 'Oracle' in str(type(self._db._adapter)): 8689 return '%s %s' % (ot, self._tablename) 8690 return '%s AS %s' % (ot, self._tablename) 8691 return self._tablename
8692
8693 - def _drop(self, mode = ''):
8694 return self._db._adapter._drop(self, mode)
8695
8696 - def drop(self, mode = ''):
8697 return self._db._adapter.drop(self,mode)
8698
8699 - def _listify(self,fields,update=False):
8700 new_fields = {} # format: new_fields[name] = (field,value) 8701 8702 # store all fields passed as input in new_fields 8703 for name in fields: 8704 if not name in self.fields: 8705 if name != 'id': 8706 raise SyntaxError( 8707 'Field %s does not belong to the table' % name) 8708 else: 8709 field = self[name] 8710 value = fields[name] 8711 if field.filter_in: 8712 value = field.filter_in(value) 8713 new_fields[name] = (field,value) 8714 8715 # check all fields that should be in the table but are not passed 8716 to_compute = [] 8717 for ofield in self: 8718 name = ofield.name 8719 if not name in new_fields: 8720 # if field is supposed to be computed, compute it! 8721 if ofield.compute: # save those to compute for later 8722 to_compute.append((name,ofield)) 8723 # if field is required, check its default value 8724 elif not update and not ofield.default is None: 8725 value = ofield.default 8726 fields[name] = value 8727 new_fields[name] = (ofield,value) 8728 # if this is an update, user the update field instead 8729 elif update and not ofield.update is None: 8730 value = ofield.update 8731 fields[name] = value 8732 new_fields[name] = (ofield,value) 8733 # if the field is still not there but it should, error 8734 elif not update and ofield.required: 8735 raise RuntimeError( 8736 'Table: missing required field: %s' % name) 8737 # now deal with fields that are supposed to be computed 8738 if to_compute: 8739 row = Row(fields) 8740 for name,ofield in to_compute: 8741 # try compute it 8742 try: 8743 row[name] = new_value = ofield.compute(row) 8744 new_fields[name] = (ofield, new_value) 8745 except (KeyError, AttributeError): 8746 # error silently unless field is required! 8747 if ofield.required: 8748 raise SyntaxError('unable to compute field: %s' % name) 8749 return new_fields.values()
8750
8751 - def _attempt_upload(self, fields):
8752 for field in self: 8753 if field.type=='upload' and field.name in fields: 8754 value = fields[field.name] 8755 if value is not None and not isinstance(value,str): 8756 if hasattr(value,'file') and hasattr(value,'filename'): 8757 new_name = field.store(value.file,filename=value.filename) 8758 elif hasattr(value,'read') and hasattr(value,'name'): 8759 new_name = field.store(value,filename=value.name) 8760 else: 8761 raise RuntimeError("Unable to handle upload") 8762 fields[field.name] = new_name
8763
8764 - def _defaults(self, fields):
8765 "If there are no fields/values specified, return table defaults" 8766 if not fields: 8767 fields = {} 8768 for field in self: 8769 if field.type != "id": 8770 fields[field.name] = field.default 8771 return fields
8772
8773 - def _insert(self, **fields):
8774 fields = self._defaults(fields) 8775 return self._db._adapter._insert(self, self._listify(fields))
8776
8777 - def insert(self, **fields):
8778 fields = self._defaults(fields) 8779 self._attempt_upload(fields) 8780 if any(f(fields) for f in self._before_insert): return 0 8781 ret = self._db._adapter.insert(self, self._listify(fields)) 8782 if ret and self._after_insert: 8783 fields = Row(fields) 8784 [f(fields,ret) for f in self._after_insert] 8785 return ret
8786
8787 - def validate_and_insert(self,**fields):
8788 response = Row() 8789 response.errors = Row() 8790 new_fields = copy.copy(fields) 8791 for key,value in fields.iteritems(): 8792 value,error = self[key].validate(value) 8793 if error: 8794 response.errors[key] = "%s" % error 8795 else: 8796 new_fields[key] = value 8797 if not response.errors: 8798 response.id = self.insert(**new_fields) 8799 else: 8800 response.id = None 8801 return response
8802
8803 - def validate_and_update(self, _key=DEFAULT, **fields):
8804 response = Row() 8805 response.errors = Row() 8806 new_fields = copy.copy(fields) 8807 8808 for key,value in fields.iteritems(): 8809 value,error = self[key].validate(value) 8810 if error: 8811 response.errors[key] = "%s" % error 8812 else: 8813 new_fields[key] = value 8814 8815 if _key is DEFAULT: 8816 record = self(**values) 8817 elif isinstance(_key,dict): 8818 record = self(**_key) 8819 else: 8820 record = self(_key) 8821 8822 if not response.errors and record: 8823 row = self._db(self._id==_key) 8824 response.id = row.update(**fields) 8825 else: 8826 response.id = None 8827 return response
8828
8829 - def update_or_insert(self, _key=DEFAULT, **values):
8830 if _key is DEFAULT: 8831 record = self(**values) 8832 elif isinstance(_key,dict): 8833 record = self(**_key) 8834 else: 8835 record = self(_key) 8836 if record: 8837 record.update_record(**values) 8838 newid = None 8839 else: 8840 newid = self.insert(**values) 8841 return newid
8842
8843 - def bulk_insert(self, items):
8844 """ 8845 here items is a list of dictionaries 8846 """ 8847 items = [self._listify(item) for item in items] 8848 if any(f(item) for item in items for f in self._before_insert):return 0 8849 ret = self._db._adapter.bulk_insert(self,items) 8850 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8851 return ret
8852
8853 - def _truncate(self, mode = None):
8854 return self._db._adapter._truncate(self, mode)
8855
8856 - def truncate(self, mode = None):
8857 return self._db._adapter.truncate(self, mode)
8858
8859 - def import_from_csv_file( 8860 self, 8861 csvfile, 8862 id_map=None, 8863 null='<NULL>', 8864 unique='uuid', 8865 id_offset=None, # id_offset used only when id_map is None 8866 *args, **kwargs 8867 ):
8868 """ 8869 Import records from csv file. 8870 Column headers must have same names as table fields. 8871 Field 'id' is ignored. 8872 If column names read 'table.file' the 'table.' prefix is ignored. 8873 'unique' argument is a field which must be unique 8874 (typically a uuid field) 8875 'restore' argument is default False; 8876 if set True will remove old values in table first. 8877 'id_map' if set to None will not map ids. 8878 The import will keep the id numbers in the restored table. 8879 This assumes that there is an field of type id that 8880 is integer and in incrementing order. 8881 Will keep the id numbers in restored table. 8882 """ 8883 8884 delimiter = kwargs.get('delimiter', ',') 8885 quotechar = kwargs.get('quotechar', '"') 8886 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8887 restore = kwargs.get('restore', False) 8888 if restore: 8889 self._db[self].truncate() 8890 8891 reader = csv.reader(csvfile, delimiter=delimiter, 8892 quotechar=quotechar, quoting=quoting) 8893 colnames = None 8894 if isinstance(id_map, dict): 8895 if not self._tablename in id_map: 8896 id_map[self._tablename] = {} 8897 id_map_self = id_map[self._tablename] 8898 8899 def fix(field, value, id_map, id_offset): 8900 list_reference_s='list:reference' 8901 if value == null: 8902 value = None 8903 elif field.type=='blob': 8904 value = base64.b64decode(value) 8905 elif field.type=='double' or field.type=='float': 8906 if not value.strip(): 8907 value = None 8908 else: 8909 value = float(value) 8910 elif field.type in ('integer','bigint'): 8911 if not value.strip(): 8912 value = None 8913 else: 8914 value = long(value) 8915 elif field.type.startswith('list:string'): 8916 value = bar_decode_string(value) 8917 elif field.type.startswith(list_reference_s): 8918 ref_table = field.type[len(list_reference_s):].strip() 8919 if id_map is not None: 8920 value = [id_map[ref_table][long(v)] \ 8921 for v in bar_decode_string(value)] 8922 else: 8923 value = [v for v in bar_decode_string(value)] 8924 elif field.type.startswith('list:'): 8925 value = bar_decode_integer(value) 8926 elif id_map and field.type.startswith('reference'): 8927 try: 8928 value = id_map[field.type[9:].strip()][long(value)] 8929 except KeyError: 8930 pass 8931 elif id_offset and field.type.startswith('reference'): 8932 try: 8933 value = id_offset[field.type[9:].strip()]+long(value) 8934 except KeyError: 8935 pass 8936 return (field.name, value)
8937 8938 def is_id(colname): 8939 if colname in self: 8940 return self[colname].type == 'id' 8941 else: 8942 return False 8943 8944 first = True 8945 unique_idx = None 8946 for lineno, line in enumerate(reader): 8947 if not line: 8948 break 8949 if not colnames: 8950 # assume this is the first line of the input, contains colnames 8951 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8952 cols, cid = [], None 8953 for i,colname in enumerate(colnames): 8954 if is_id(colname): 8955 cid = i 8956 elif colname in self.fields: 8957 cols.append((i,self[colname])) 8958 if colname == unique: 8959 unique_idx = i 8960 else: 8961 # every other line contains instead data 8962 items = [] 8963 for i, field in cols: 8964 try: 8965 items.append(fix(field, line[i], id_map, id_offset)) 8966 except ValueError: 8967 raise RuntimeError("Unable to parse line:%s field:%s value:'%s'" 8968 % (lineno+1,field,line[i])) 8969 8970 if not (id_map or cid is None or id_offset is None or unique_idx): 8971 csv_id = long(line[cid]) 8972 curr_id = self.insert(**dict(items)) 8973 if first: 8974 first = False 8975 # First curr_id is bigger than csv_id, 8976 # then we are not restoring but 8977 # extending db table with csv db table 8978 id_offset[self._tablename] = (curr_id-csv_id) \ 8979 if curr_id>csv_id else 0 8980 # create new id until we get the same as old_id+offset 8981 while curr_id<csv_id+id_offset[self._tablename]: 8982 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8983 curr_id = self.insert(**dict(items)) 8984 # Validation. Check for duplicate of 'unique' &, 8985 # if present, update instead of insert. 8986 elif not unique_idx: 8987 new_id = self.insert(**dict(items)) 8988 else: 8989 unique_value = line[unique_idx] 8990 query = self._db[self][unique] == unique_value 8991 record = self._db(query).select().first() 8992 if record: 8993 record.update_record(**dict(items)) 8994 new_id = record[self._id.name] 8995 else: 8996 new_id = self.insert(**dict(items)) 8997 if id_map and cid is not None: 8998 id_map_self[long(line[cid])] = new_id 8999
9000 - def as_dict(self, flat=False, sanitize=True):
9001 table_as_dict = dict(tablename=str(self), fields=[], 9002 sequence_name=self._sequence_name, 9003 trigger_name=self._trigger_name, 9004 common_filter=self._common_filter, format=self._format, 9005 singular=self._singular, plural=self._plural) 9006 9007 for field in self: 9008 if (field.readable or field.writable) or (not sanitize): 9009 table_as_dict["fields"].append(field.as_dict( 9010 flat=flat, sanitize=sanitize)) 9011 return table_as_dict
9012
9013 - def as_xml(self, sanitize=True):
9014 if not have_serializers: 9015 raise ImportError("No xml serializers available") 9016 d = self.as_dict(flat=True, sanitize=sanitize) 9017 return serializers.xml(d)
9018
9019 - def as_json(self, sanitize=True):
9020 if not have_serializers: 9021 raise ImportError("No json serializers available") 9022 d = self.as_dict(flat=True, sanitize=sanitize) 9023 return serializers.json(d)
9024
9025 - def as_yaml(self, sanitize=True):
9026 if not have_serializers: 9027 raise ImportError("No YAML serializers available") 9028 d = self.as_dict(flat=True, sanitize=sanitize) 9029 return serializers.yaml(d)
9030
9031 - def with_alias(self, alias):
9032 return self._db._adapter.alias(self,alias)
9033
9034 - def on(self, query):
9035 return Expression(self._db,self._db._adapter.ON,self,query)
9036
9037 -def archive_record(qset,fs,archive_table,current_record):
9038 tablenames = qset.db._adapter.tables(qset.query) 9039 if len(tablenames)!=1: raise RuntimeError("cannot update join") 9040 table = qset.db[tablenames[0]] 9041 for row in qset.select(): 9042 fields = archive_table._filter_fields(row) 9043 fields[current_record] = row.id 9044 archive_table.insert(**fields) 9045 return False
9046
9047 9048 9049 -class Expression(object):
9050
9051 - def __init__( 9052 self, 9053 db, 9054 op, 9055 first=None, 9056 second=None, 9057 type=None, 9058 **optional_args 9059 ):
9060 9061 self.db = db 9062 self.op = op 9063 self.first = first 9064 self.second = second 9065 self._table = getattr(first,'_table',None) 9066 ### self._tablename = first._tablename ## CHECK 9067 if not type and first and hasattr(first,'type'): 9068 self.type = first.type 9069 else: 9070 self.type = type 9071 self.optional_args = optional_args
9072
9073 - def sum(self):
9074 db = self.db 9075 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
9076
9077 - def max(self):
9078 db = self.db 9079 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
9080
9081 - def min(self):
9082 db = self.db 9083 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
9084
9085 - def len(self):
9086 db = self.db 9087 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
9088
9089 - def avg(self):
9090 db = self.db 9091 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
9092
9093 - def abs(self):
9094 db = self.db 9095 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
9096
9097 - def lower(self):
9098 db = self.db 9099 return Expression(db, db._adapter.LOWER, self, None, self.type)
9100
9101 - def upper(self):
9102 db = self.db 9103 return Expression(db, db._adapter.UPPER, self, None, self.type)
9104
9105 - def replace(self,a,b):
9106 db = self.db 9107 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
9108
9109 - def year(self):
9110 db = self.db 9111 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
9112
9113 - def month(self):
9114 db = self.db 9115 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
9116
9117 - def day(self):
9118 db = self.db 9119 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
9120
9121 - def hour(self):
9122 db = self.db 9123 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
9124
9125 - def minutes(self):
9126 db = self.db 9127 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
9128
9129 - def coalesce(self,*others):
9130 db = self.db 9131 return Expression(db, db._adapter.COALESCE, self, others, self.type)
9132
9133 - def coalesce_zero(self):
9134 db = self.db 9135 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
9136
9137 - def seconds(self):
9138 db = self.db 9139 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
9140
9141 - def epoch(self):
9142 db = self.db 9143 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
9144
9145 - def __getslice__(self, start, stop):
9146 db = self.db 9147 if start < 0: 9148 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 9149 else: 9150 pos0 = start + 1 9151 9152 if stop < 0: 9153 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 9154 elif stop == sys.maxint: 9155 length = self.len() 9156 else: 9157 length = '(%s - %s)' % (stop + 1, pos0) 9158 return Expression(db,db._adapter.SUBSTRING, 9159 self, (pos0, length), self.type)
9160
9161 - def __getitem__(self, i):
9162 return self[i:i + 1]
9163
9164 - def __str__(self):
9165 return self.db._adapter.expand(self,self.type)
9166
9167 - def __or__(self, other): # for use in sortby
9168 db = self.db 9169 return Expression(db,db._adapter.COMMA,self,other,self.type)
9170
9171 - def __invert__(self):
9172 db = self.db 9173 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 9174 return self.first 9175 return Expression(db,db._adapter.INVERT,self,type=self.type)
9176
9177 - def __add__(self, other):
9178 db = self.db 9179 return Expression(db,db._adapter.ADD,self,other,self.type)
9180
9181 - def __sub__(self, other):
9182 db = self.db 9183 if self.type in ('integer','bigint'): 9184 result_type = 'integer' 9185 elif self.type in ['date','time','datetime','double','float']: 9186 result_type = 'double' 9187 elif self.type.startswith('decimal('): 9188 result_type = self.type 9189 else: 9190 raise SyntaxError("subtraction operation not supported for type") 9191 return Expression(db,db._adapter.SUB,self,other,result_type)
9192
9193 - def __mul__(self, other):
9194 db = self.db 9195 return Expression(db,db._adapter.MUL,self,other,self.type)
9196
9197 - def __div__(self, other):
9198 db = self.db 9199 return Expression(db,db._adapter.DIV,self,other,self.type)
9200
9201 - def __mod__(self, other):
9202 db = self.db 9203 return Expression(db,db._adapter.MOD,self,other,self.type)
9204
9205 - def __eq__(self, value):
9206 db = self.db 9207 return Query(db, db._adapter.EQ, self, value)
9208
9209 - def __ne__(self, value):
9210 db = self.db 9211 return Query(db, db._adapter.NE, self, value)
9212
9213 - def __lt__(self, value):
9214 db = self.db 9215 return Query(db, db._adapter.LT, self, value)
9216
9217 - def __le__(self, value):
9218 db = self.db 9219 return Query(db, db._adapter.LE, self, value)
9220
9221 - def __gt__(self, value):
9222 db = self.db 9223 return Query(db, db._adapter.GT, self, value)
9224
9225 - def __ge__(self, value):
9226 db = self.db 9227 return Query(db, db._adapter.GE, self, value)
9228
9229 - def like(self, value, case_sensitive=False):
9230 db = self.db 9231 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9232 return Query(db, op, self, value)
9233
9234 - def regexp(self, value):
9235 db = self.db 9236 return Query(db, db._adapter.REGEXP, self, value)
9237
9238 - def belongs(self, *value, **kwattr):
9239 """ 9240 Accepts the following inputs: 9241 field.belongs(1,2) 9242 field.belongs((1,2)) 9243 field.belongs(query) 9244 9245 Does NOT accept: 9246 field.belongs(1) 9247 """ 9248 db = self.db 9249 if len(value) == 1: 9250 value = value[0] 9251 if isinstance(value,Query): 9252 value = db(value)._select(value.first._table._id) 9253 elif not isinstance(value, basestring): 9254 value = set(value) 9255 if kwattr.get('null') and None in value: 9256 value.remove(None) 9257 return (self == None) | Query(db, db._adapter.BELONGS, self, value) 9258 return Query(db, db._adapter.BELONGS, self, value)
9259
9260 - def startswith(self, value):
9261 db = self.db 9262 if not self.type in ('string', 'text', 'json', 'upload'): 9263 raise SyntaxError("startswith used with incompatible field type") 9264 return Query(db, db._adapter.STARTSWITH, self, value)
9265
9266 - def endswith(self, value):
9267 db = self.db 9268 if not self.type in ('string', 'text', 'json', 'upload'): 9269 raise SyntaxError("endswith used with incompatible field type") 9270 return Query(db, db._adapter.ENDSWITH, self, value)
9271
9272 - def contains(self, value, all=False, case_sensitive=False):
9273 """ 9274 The case_sensitive parameters is only useful for PostgreSQL 9275 For other RDMBs it is ignored and contains is always case in-sensitive 9276 For MongoDB and GAE contains is always case sensitive 9277 """ 9278 db = self.db 9279 if isinstance(value,(list, tuple)): 9280 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9281 for v in value if str(v).strip()] 9282 if not subqueries: 9283 return self.contains('') 9284 else: 9285 return reduce(all and AND or OR,subqueries) 9286 if not self.type in ('string', 'text', 'json', 'upload') and not self.type.startswith('list:'): 9287 raise SyntaxError("contains used with incompatible field type") 9288 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9289
9290 - def with_alias(self, alias):
9291 db = self.db 9292 return Expression(db, db._adapter.AS, self, alias, self.type)
9293 9294 # GIS expressions 9295
9296 - def st_asgeojson(self, precision=15, options=0, version=1):
9297 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9298 dict(precision=precision, options=options, 9299 version=version), 'string')
9300
9301 - def st_astext(self):
9302 db = self.db 9303 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9304
9305 - def st_x(self):
9306 db = self.db 9307 return Expression(db, db._adapter.ST_X, self, type='string')
9308
9309 - def st_y(self):
9310 db = self.db 9311 return Expression(db, db._adapter.ST_Y, self, type='string')
9312
9313 - def st_distance(self, other):
9314 db = self.db 9315 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9316
9317 - def st_simplify(self, value):
9318 db = self.db 9319 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9320 9321 # GIS queries 9322
9323 - def st_contains(self, value):
9324 db = self.db 9325 return Query(db, db._adapter.ST_CONTAINS, self, value)
9326
9327 - def st_equals(self, value):
9328 db = self.db 9329 return Query(db, db._adapter.ST_EQUALS, self, value)
9330
9331 - def st_intersects(self, value):
9332 db = self.db 9333 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9334
9335 - def st_overlaps(self, value):
9336 db = self.db 9337 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9338
9339 - def st_touches(self, value):
9340 db = self.db 9341 return Query(db, db._adapter.ST_TOUCHES, self, value)
9342
9343 - def st_within(self, value):
9344 db = self.db 9345 return Query(db, db._adapter.ST_WITHIN, self, value)
9346
9347 # for use in both Query and sortby 9348 9349 9350 -class SQLCustomType(object):
9351 """ 9352 allows defining of custom SQL types 9353 9354 Example:: 9355 9356 decimal = SQLCustomType( 9357 type ='double', 9358 native ='integer', 9359 encoder =(lambda x: int(float(x) * 100)), 9360 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9361 ) 9362 9363 db.define_table( 9364 'example', 9365 Field('value', type=decimal) 9366 ) 9367 9368 :param type: the web2py type (default = 'string') 9369 :param native: the backend type 9370 :param encoder: how to encode the value to store it in the backend 9371 :param decoder: how to decode the value retrieved from the backend 9372 :param validator: what validators to use ( default = None, will use the 9373 default validator for type) 9374 """ 9375
9376 - def __init__( 9377 self, 9378 type='string', 9379 native=None, 9380 encoder=None, 9381 decoder=None, 9382 validator=None, 9383 _class=None, 9384 ):
9385 9386 self.type = type 9387 self.native = native 9388 self.encoder = encoder or (lambda x: x) 9389 self.decoder = decoder or (lambda x: x) 9390 self.validator = validator 9391 self._class = _class or type
9392
9393 - def startswith(self, text=None):
9394 try: 9395 return self.type.startswith(self, text) 9396 except TypeError: 9397 return False
9398
9399 - def __getslice__(self, a=0, b=100):
9400 return None
9401
9402 - def __getitem__(self, i):
9403 return None
9404
9405 - def __str__(self):
9406 return self._class
9407
9408 -class FieldVirtual(object):
9409 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9410 # for backward compatibility 9411 (self.name, self.f) = (name, f) if f else ('unknown', name) 9412 self.type = ftype 9413 self.label = label or self.name.capitalize().replace('_',' ') 9414 self.represent = lambda v,r:v 9415 self.formatter = IDENTITY 9416 self.comment = None 9417 self.readable = True 9418 self.writable = False 9419 self.requires = None 9420 self.widget = None 9421 self.tablename = table_name 9422 self.filter_out = None
9423 - def __str__(self):
9424 return '%s.%s' % (self.tablename, self.name)
9425
9426 -class FieldMethod(object):
9427 - def __init__(self, name, f=None, handler=None):
9428 # for backward compatibility 9429 (self.name, self.f) = (name, f) if f else ('unknown', name) 9430 self.handler = handler
9431
9432 -def list_represent(x,r=None):
9433 return ', '.join(str(y) for y in x or [])
9434
9435 -class Field(Expression):
9436 9437 Virtual = FieldVirtual 9438 Method = FieldMethod 9439 Lazy = FieldMethod # for backward compatibility 9440 9441 """ 9442 an instance of this class represents a database field 9443 9444 example:: 9445 9446 a = Field(name, 'string', length=32, default=None, required=False, 9447 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9448 notnull=False, unique=False, 9449 uploadfield=True, widget=None, label=None, comment=None, 9450 uploadfield=True, # True means store on disk, 9451 # 'a_field_name' means store in this field in db 9452 # False means file content will be discarded. 9453 writable=True, readable=True, update=None, authorize=None, 9454 autodelete=False, represent=None, uploadfolder=None, 9455 uploadseparate=False # upload to separate directories by uuid_keys 9456 # first 2 character and tablename.fieldname 9457 # False - old behavior 9458 # True - put uploaded file in 9459 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9460 # directory) 9461 uploadfs=None # a pyfilesystem where to store upload 9462 9463 to be used as argument of DAL.define_table 9464 9465 allowed field types: 9466 string, boolean, integer, double, text, blob, 9467 date, time, datetime, upload, password 9468 9469 """ 9470
9471 - def __init__( 9472 self, 9473 fieldname, 9474 type='string', 9475 length=None, 9476 default=DEFAULT, 9477 required=False, 9478 requires=DEFAULT, 9479 ondelete='CASCADE', 9480 notnull=False, 9481 unique=False, 9482 uploadfield=True, 9483 widget=None, 9484 label=None, 9485 comment=None, 9486 writable=True, 9487 readable=True, 9488 update=None, 9489 authorize=None, 9490 autodelete=False, 9491 represent=None, 9492 uploadfolder=None, 9493 uploadseparate=False, 9494 uploadfs=None, 9495 compute=None, 9496 custom_store=None, 9497 custom_retrieve=None, 9498 custom_retrieve_file_properties=None, 9499 custom_delete=None, 9500 filter_in = None, 9501 filter_out = None, 9502 custom_qualifier = None, 9503 map_none = None, 9504 ):
9505 self._db = self.db = None # both for backward compatibility 9506 self.op = None 9507 self.first = None 9508 self.second = None 9509 if isinstance(fieldname, unicode): 9510 try: 9511 fieldname = str(fieldname) 9512 except UnicodeEncodeError: 9513 raise SyntaxError('Field: invalid unicode field name') 9514 self.name = fieldname = cleanup(fieldname) 9515 if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \ 9516 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9517 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9518 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9519 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9520 self.default = default if default!=DEFAULT else (update or None) 9521 self.required = required # is this field required 9522 self.ondelete = ondelete.upper() # this is for reference fields only 9523 self.notnull = notnull 9524 self.unique = unique 9525 self.uploadfield = uploadfield 9526 self.uploadfolder = uploadfolder 9527 self.uploadseparate = uploadseparate 9528 self.uploadfs = uploadfs 9529 self.widget = widget 9530 self.comment = comment 9531 self.writable = writable 9532 self.readable = readable 9533 self.update = update 9534 self.authorize = authorize 9535 self.autodelete = autodelete 9536 self.represent = list_represent if \ 9537 represent==None and type in ('list:integer','list:string') else represent 9538 self.compute = compute 9539 self.isattachment = True 9540 self.custom_store = custom_store 9541 self.custom_retrieve = custom_retrieve 9542 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9543 self.custom_delete = custom_delete 9544 self.filter_in = filter_in 9545 self.filter_out = filter_out 9546 self.custom_qualifier = custom_qualifier 9547 self.label = label if label!=None else fieldname.replace('_',' ').title() 9548 self.requires = requires if requires!=None else [] 9549 self.map_none = map_none
9550
9551 - def set_attributes(self,*args,**attributes):
9552 self.__dict__.update(*args,**attributes)
9553
9554 - def clone(self,point_self_references_to=False,**args):
9555 field = copy.copy(self) 9556 if point_self_references_to and \ 9557 field.type == 'reference %s'+field._tablename: 9558 field.type = 'reference %s' % point_self_references_to 9559 field.__dict__.update(args) 9560 return field
9561
9562 - def store(self, file, filename=None, path=None):
9563 if self.custom_store: 9564 return self.custom_store(file,filename,path) 9565 if isinstance(file, cgi.FieldStorage): 9566 filename = filename or file.filename 9567 file = file.file 9568 elif not filename: 9569 filename = file.name 9570 filename = os.path.basename(filename.replace('/', os.sep)\ 9571 .replace('\\', os.sep)) 9572 m = REGEX_STORE_PATTERN.search(filename) 9573 extension = m and m.group('e') or 'txt' 9574 uuid_key = web2py_uuid().replace('-', '')[-16:] 9575 encoded_filename = base64.b16encode(filename).lower() 9576 newfilename = '%s.%s.%s.%s' % \ 9577 (self._tablename, self.name, uuid_key, encoded_filename) 9578 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9579 self_uploadfield = self.uploadfield 9580 if isinstance(self_uploadfield,Field): 9581 blob_uploadfield_name = self_uploadfield.uploadfield 9582 keys={self_uploadfield.name: newfilename, 9583 blob_uploadfield_name: file.read()} 9584 self_uploadfield.table.insert(**keys) 9585 elif self_uploadfield == True: 9586 if path: 9587 pass 9588 elif self.uploadfolder: 9589 path = self.uploadfolder 9590 elif self.db._adapter.folder: 9591 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9592 else: 9593 raise RuntimeError( 9594 "you must specify a Field(...,uploadfolder=...)") 9595 if self.uploadseparate: 9596 if self.uploadfs: 9597 raise RuntimeError("not supported") 9598 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9599 uuid_key[:2]) 9600 if not exists(path): 9601 os.makedirs(path) 9602 pathfilename = pjoin(path, newfilename) 9603 if self.uploadfs: 9604 dest_file = self.uploadfs.open(newfilename, 'wb') 9605 else: 9606 dest_file = open(pathfilename, 'wb') 9607 try: 9608 shutil.copyfileobj(file, dest_file) 9609 except IOError: 9610 raise IOError( 9611 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9612 dest_file.close() 9613 return newfilename
9614
9615 - def retrieve(self, name, path=None, nameonly=False):
9616 """ 9617 if nameonly==True return (filename, fullfilename) instead of 9618 (filename, stream) 9619 """ 9620 self_uploadfield = self.uploadfield 9621 if self.custom_retrieve: 9622 return self.custom_retrieve(name, path) 9623 import http 9624 if self.authorize or isinstance(self_uploadfield, str): 9625 row = self.db(self == name).select().first() 9626 if not row: 9627 raise http.HTTP(404) 9628 if self.authorize and not self.authorize(row): 9629 raise http.HTTP(403) 9630 file_properties = self.retrieve_file_properties(name,path) 9631 filename = file_properties['filename'] 9632 if isinstance(self_uploadfield, str): # ## if file is in DB 9633 stream = StringIO.StringIO(row[self_uploadfield] or '') 9634 elif isinstance(self_uploadfield,Field): 9635 blob_uploadfield_name = self_uploadfield.uploadfield 9636 query = self_uploadfield == name 9637 data = self_uploadfield.table(query)[blob_uploadfield_name] 9638 stream = StringIO.StringIO(data) 9639 elif self.uploadfs: 9640 # ## if file is on pyfilesystem 9641 stream = self.uploadfs.open(name, 'rb') 9642 else: 9643 # ## if file is on regular filesystem 9644 # this is intentially a sting with filename and not a stream 9645 # this propagates and allows stream_file_or_304_or_206 to be called 9646 fullname = pjoin(file_properties['path'],name) 9647 if nameonly: 9648 return (filename, fullname) 9649 stream = open(fullname,'rb') 9650 return (filename, stream)
9651
9652 - def retrieve_file_properties(self, name, path=None):
9653 m = REGEX_UPLOAD_PATTERN.match(name) 9654 if not m or not self.isattachment: 9655 raise TypeError('Can\'t retrieve %s file properties' % name) 9656 self_uploadfield = self.uploadfield 9657 if self.custom_retrieve_file_properties: 9658 return self.custom_retrieve_file_properties(name, path) 9659 if m.group('name'): 9660 try: 9661 filename = base64.b16decode(m.group('name'), True) 9662 filename = REGEX_CLEANUP_FN.sub('_', filename) 9663 except (TypeError, AttributeError): 9664 filename = name 9665 else: 9666 filename = name 9667 # ## if file is in DB 9668 if isinstance(self_uploadfield, (str, Field)): 9669 return dict(path=None,filename=filename) 9670 # ## if file is on filesystem 9671 if not path: 9672 if self.uploadfolder: 9673 path = self.uploadfolder 9674 else: 9675 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9676 if self.uploadseparate: 9677 t = m.group('table') 9678 f = m.group('field') 9679 u = m.group('uuidkey') 9680 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9681 return dict(path=path,filename=filename)
9682 9683
9684 - def formatter(self, value):
9685 requires = self.requires 9686 if value is None or not requires: 9687 return value or self.map_none 9688 if not isinstance(requires, (list, tuple)): 9689 requires = [requires] 9690 elif isinstance(requires, tuple): 9691 requires = list(requires) 9692 else: 9693 requires = copy.copy(requires) 9694 requires.reverse() 9695 for item in requires: 9696 if hasattr(item, 'formatter'): 9697 value = item.formatter(value) 9698 return value
9699
9700 - def validate(self, value):
9701 if not self.requires or self.requires == DEFAULT: 9702 return ((value if value!=self.map_none else None), None) 9703 requires = self.requires 9704 if not isinstance(requires, (list, tuple)): 9705 requires = [requires] 9706 for validator in requires: 9707 (value, error) = validator(value) 9708 if error: 9709 return (value, error) 9710 return ((value if value!=self.map_none else None), None)
9711
9712 - def count(self, distinct=None):
9713 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9714
9715 - def as_dict(self, flat=False, sanitize=True):
9716 attrs = ("name", 'authorize', 'represent', 'ondelete', 9717 'custom_store', 'autodelete', 'custom_retrieve', 9718 'filter_out', 'uploadseparate', 'widget', 'uploadfs', 9719 'update', 'custom_delete', 'uploadfield', 'uploadfolder', 9720 'custom_qualifier', 'unique', 'writable', 'compute', 9721 'map_none', 'default', 'type', 'required', 'readable', 9722 'requires', 'comment', 'label', 'length', 'notnull', 9723 'custom_retrieve_file_properties', 'filter_in') 9724 serializable = (int, long, basestring, float, tuple, 9725 bool, type(None)) 9726 9727 def flatten(obj): 9728 if isinstance(obj, dict): 9729 return dict((flatten(k), flatten(v)) for k, v in 9730 obj.items()) 9731 elif isinstance(obj, (tuple, list, set)): 9732 return [flatten(v) for v in obj] 9733 elif isinstance(obj, serializable): 9734 return obj 9735 elif isinstance(obj, (datetime.datetime, 9736 datetime.date, datetime.time)): 9737 return str(obj) 9738 else: 9739 return None
9740 9741 d = dict() 9742 if not (sanitize and not (self.readable or self.writable)): 9743 for attr in attrs: 9744 if flat: 9745 d.update({attr: flatten(getattr(self, attr))}) 9746 else: 9747 d.update({attr: getattr(self, attr)}) 9748 d["fieldname"] = d.pop("name") 9749 return d
9750
9751 - def as_xml(self, sanitize=True):
9752 if have_serializers: 9753 xml = serializers.xml 9754 else: 9755 raise ImportError("No xml serializers available") 9756 d = self.as_dict(flat=True, sanitize=sanitize) 9757 return xml(d)
9758
9759 - def as_json(self, sanitize=True):
9760 if have_serializers: 9761 json = serializers.json 9762 else: 9763 raise ImportError("No json serializers available") 9764 d = self.as_dict(flat=True, sanitize=sanitize) 9765 return json(d)
9766
9767 - def as_yaml(self, sanitize=True):
9768 if have_serializers: 9769 d = self.as_dict(flat=True, sanitize=sanitize) 9770 return serializers.yaml(d) 9771 else: 9772 raise ImportError("No YAML serializers available")
9773
9774 - def __nonzero__(self):
9775 return True
9776
9777 - def __str__(self):
9778 try: 9779 return '%s.%s' % (self.tablename, self.name) 9780 except: 9781 return '<no table>.%s' % self.name
9782
9783 9784 -class Query(object):
9785 9786 """ 9787 a query object necessary to define a set. 9788 it can be stored or can be passed to DAL.__call__() to obtain a Set 9789 9790 Example:: 9791 9792 query = db.users.name=='Max' 9793 set = db(query) 9794 records = set.select() 9795 9796 """ 9797
9798 - def __init__( 9799 self, 9800 db, 9801 op, 9802 first=None, 9803 second=None, 9804 ignore_common_filters = False, 9805 **optional_args 9806 ):
9807 self.db = self._db = db 9808 self.op = op 9809 self.first = first 9810 self.second = second 9811 self.ignore_common_filters = ignore_common_filters 9812 self.optional_args = optional_args
9813
9814 - def __repr__(self):
9815 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9816
9817 - def __str__(self):
9818 return self.db._adapter.expand(self)
9819
9820 - def __and__(self, other):
9821 return Query(self.db,self.db._adapter.AND,self,other)
9822 9823 __rand__ = __and__ 9824
9825 - def __or__(self, other):
9826 return Query(self.db,self.db._adapter.OR,self,other)
9827 9828 __ror__ = __or__ 9829
9830 - def __invert__(self):
9831 if self.op==self.db._adapter.NOT: 9832 return self.first 9833 return Query(self.db,self.db._adapter.NOT,self)
9834
9835 - def __eq__(self, other):
9836 return repr(self) == repr(other)
9837
9838 - def __ne__(self, other):
9839 return not (self == other)
9840
9841 - def case(self,t=1,f=0):
9842 return self.db._adapter.CASE(self,t,f)
9843
9844 - def as_dict(self, flat=False, sanitize=True):
9845 """Experimental stuff 9846 9847 This allows to return a plain dictionary with the basic 9848 query representation. Can be used with json/xml services 9849 for client-side db I/O 9850 9851 Example: 9852 >>> q = db.auth_user.id != 0 9853 >>> q.as_dict(flat=True) 9854 {"op": "NE", "first":{"tablename": "auth_user", 9855 "fieldname": "id"}, 9856 "second":0} 9857 """ 9858 9859 SERIALIZABLE_TYPES = (tuple, dict, set, list, int, long, float, 9860 basestring, type(None), bool) 9861 def loop(d): 9862 newd = dict() 9863 for k, v in d.items(): 9864 if k in ("first", "second"): 9865 if isinstance(v, self.__class__): 9866 newd[k] = loop(v.__dict__) 9867 elif isinstance(v, Field): 9868 newd[k] = {"tablename": v._tablename, 9869 "fieldname": v.name} 9870 elif isinstance(v, Expression): 9871 newd[k] = loop(v.__dict__) 9872 elif isinstance(v, SERIALIZABLE_TYPES): 9873 newd[k] = v 9874 elif isinstance(v, (datetime.date, 9875 datetime.time, 9876 datetime.datetime)): 9877 newd[k] = unicode(v) 9878 elif k == "op": 9879 if callable(v): 9880 newd[k] = v.__name__ 9881 elif isinstance(v, basestring): 9882 newd[k] = v 9883 else: pass # not callable or string 9884 elif isinstance(v, SERIALIZABLE_TYPES): 9885 if isinstance(v, dict): 9886 newd[k] = loop(v) 9887 else: newd[k] = v 9888 return newd
9889 9890 if flat: 9891 return loop(self.__dict__) 9892 else: return self.__dict__
9893 9894
9895 - def as_xml(self, sanitize=True):
9896 if have_serializers: 9897 xml = serializers.xml 9898 else: 9899 raise ImportError("No xml serializers available") 9900 d = self.as_dict(flat=True, sanitize=sanitize) 9901 return xml(d)
9902
9903 - def as_json(self, sanitize=True):
9904 if have_serializers: 9905 json = serializers.json 9906 else: 9907 raise ImportError("No json serializers available") 9908 d = self.as_dict(flat=True, sanitize=sanitize) 9909 return json(d)
9910
9911 -def xorify(orderby):
9912 if not orderby: 9913 return None 9914 orderby2 = orderby[0] 9915 for item in orderby[1:]: 9916 orderby2 = orderby2 | item 9917 return orderby2
9918
9919 -def use_common_filters(query):
9920 return (query and hasattr(query,'ignore_common_filters') and \ 9921 not query.ignore_common_filters)
9922
9923 -class Set(object):
9924 9925 """ 9926 a Set represents a set of records in the database, 9927 the records are identified by the query=Query(...) object. 9928 normally the Set is generated by DAL.__call__(Query(...)) 9929 9930 given a set, for example 9931 set = db(db.users.name=='Max') 9932 you can: 9933 set.update(db.users.name='Massimo') 9934 set.delete() # all elements in the set 9935 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9936 and take subsets: 9937 subset = set(db.users.id<5) 9938 """ 9939
9940 - def __init__(self, db, query, ignore_common_filters = None):
9941 self.db = db 9942 self._db = db # for backward compatibility 9943 self.dquery = None 9944 9945 # if query is a dict, parse it 9946 if isinstance(query, dict): 9947 query = self.parse(query) 9948 9949 if not ignore_common_filters is None and \ 9950 use_common_filters(query) == ignore_common_filters: 9951 query = copy.copy(query) 9952 query.ignore_common_filters = ignore_common_filters 9953 self.query = query
9954
9955 - def __repr__(self):
9956 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9957
9958 - def __call__(self, query, ignore_common_filters=False):
9959 if query is None: 9960 return self 9961 elif isinstance(query,Table): 9962 query = self.db._adapter.id_query(query) 9963 elif isinstance(query,str): 9964 query = Expression(self.db,query) 9965 elif isinstance(query,Field): 9966 query = query!=None 9967 if self.query: 9968 return Set(self.db, self.query & query, 9969 ignore_common_filters=ignore_common_filters) 9970 else: 9971 return Set(self.db, query, 9972 ignore_common_filters=ignore_common_filters)
9973
9974 - def _count(self,distinct=None):
9975 return self.db._adapter._count(self.query,distinct)
9976
9977 - def _select(self, *fields, **attributes):
9978 adapter = self.db._adapter 9979 tablenames = adapter.tables(self.query, 9980 attributes.get('join',None), 9981 attributes.get('left',None), 9982 attributes.get('orderby',None), 9983 attributes.get('groupby',None)) 9984 fields = adapter.expand_all(fields, tablenames) 9985 return adapter._select(self.query,fields,attributes)
9986
9987 - def _delete(self):
9988 db = self.db 9989 tablename = db._adapter.get_table(self.query) 9990 return db._adapter._delete(tablename,self.query)
9991
9992 - def _update(self, **update_fields):
9993 db = self.db 9994 tablename = db._adapter.get_table(self.query) 9995 fields = db[tablename]._listify(update_fields,update=True) 9996 return db._adapter._update(tablename,self.query,fields)
9997
9998 - def as_dict(self, flat=False, sanitize=True):
9999 if flat: 10000 uid = dbname = uri = None 10001 codec = self.db._db_codec 10002 if not sanitize: 10003 uri, dbname, uid = (self.db._dbname, str(self.db), 10004 self.db._db_uid) 10005 d = {"query": self.query.as_dict(flat=flat)} 10006 d["db"] = {"uid": uid, "codec": codec, 10007 "name": dbname, "uri": uri} 10008 return d 10009 else: return self.__dict__
10010
10011 - def as_xml(self, sanitize=True):
10012 if have_serializers: 10013 xml = serializers.xml 10014 else: 10015 raise ImportError("No xml serializers available") 10016 d = self.as_dict(flat=True, sanitize=sanitize) 10017 return xml(d)
10018
10019 - def as_json(self, sanitize=True):
10020 if have_serializers: 10021 json = serializers.json 10022 else: 10023 raise ImportError("No json serializers available") 10024 d = self.as_dict(flat=True, sanitize=sanitize) 10025 return json(d)
10026
10027 - def parse(self, dquery):
10028 "Experimental: Turn a dictionary into a Query object" 10029 self.dquery = dquery 10030 return self.build(self.dquery)
10031
10032 - def build(self, d):
10033 "Experimental: see .parse()" 10034 op, first, second = (d["op"], d["first"], 10035 d.get("second", None)) 10036 left = right = built = None 10037 10038 if op in ("AND", "OR"): 10039 if not (type(first), type(second)) == (dict, dict): 10040 raise SyntaxError("Invalid AND/OR query") 10041 if op == "AND": 10042 built = self.build(first) & self.build(second) 10043 else: built = self.build(first) | self.build(second) 10044 10045 elif op == "NOT": 10046 if first is None: 10047 raise SyntaxError("Invalid NOT query") 10048 built = ~self.build(first) 10049 else: 10050 # normal operation (GT, EQ, LT, ...) 10051 for k, v in {"left": first, "right": second}.items(): 10052 if isinstance(v, dict) and v.get("op"): 10053 v = self.build(v) 10054 if isinstance(v, dict) and ("tablename" in v): 10055 v = self.db[v["tablename"]][v["fieldname"]] 10056 if k == "left": left = v 10057 else: right = v 10058 10059 if hasattr(self.db._adapter, op): 10060 opm = getattr(self.db._adapter, op) 10061 10062 if op == "EQ": built = left == right 10063 elif op == "NE": built = left != right 10064 elif op == "GT": built = left > right 10065 elif op == "GE": built = left >= right 10066 elif op == "LT": built = left < right 10067 elif op == "LE": built = left <= right 10068 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 10069 built = Expression(self.db, opm) 10070 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 10071 "COALESCE_ZERO", "RAW", "INVERT"): 10072 built = Expression(self.db, opm, left) 10073 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 10074 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 10075 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 10076 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 10077 "COALESCE", "CONTAINS", "BELONGS"): 10078 built = Expression(self.db, opm, left, right) 10079 # expression as string 10080 elif not (left or right): built = Expression(self.db, op) 10081 else: 10082 raise SyntaxError("Operator not supported: %s" % op) 10083 10084 return built
10085
10086 - def isempty(self):
10087 return not self.select(limitby=(0,1), orderby_on_limitby=False)
10088
10089 - def count(self,distinct=None, cache=None):
10090 db = self.db 10091 if cache: 10092 cache_model, time_expire = cache 10093 sql = self._count(distinct=distinct) 10094 key = db._uri + '/' + sql 10095 if len(key)>200: key = hashlib_md5(key).hexdigest() 10096 return cache_model( 10097 key, 10098 (lambda self=self,distinct=distinct: \ 10099 db._adapter.count(self.query,distinct)), 10100 time_expire) 10101 return db._adapter.count(self.query,distinct)
10102
10103 - def select(self, *fields, **attributes):
10104 adapter = self.db._adapter 10105 tablenames = adapter.tables(self.query, 10106 attributes.get('join',None), 10107 attributes.get('left',None), 10108 attributes.get('orderby',None), 10109 attributes.get('groupby',None)) 10110 fields = adapter.expand_all(fields, tablenames) 10111 return adapter.select(self.query,fields,attributes)
10112
10113 - def nested_select(self,*fields,**attributes):
10114 return Expression(self.db,self._select(*fields,**attributes))
10115
10116 - def delete(self):
10117 db = self.db 10118 tablename = db._adapter.get_table(self.query) 10119 table = db[tablename] 10120 if any(f(self) for f in table._before_delete): return 0 10121 ret = db._adapter.delete(tablename,self.query) 10122 ret and [f(self) for f in table._after_delete] 10123 return ret
10124
10125 - def update(self, **update_fields):
10126 db = self.db 10127 tablename = db._adapter.get_table(self.query) 10128 table = db[tablename] 10129 table._attempt_upload(update_fields) 10130 if any(f(self,update_fields) for f in table._before_update): 10131 return 0 10132 fields = table._listify(update_fields,update=True) 10133 if not fields: 10134 raise SyntaxError("No fields to update") 10135 ret = db._adapter.update("%s" % table,self.query,fields) 10136 ret and [f(self,update_fields) for f in table._after_update] 10137 return ret
10138
10139 - def update_naive(self, **update_fields):
10140 """ 10141 same as update but does not call table._before_update and _after_update 10142 """ 10143 tablename = self.db._adapter.get_table(self.query) 10144 table = self.db[tablename] 10145 fields = table._listify(update_fields,update=True) 10146 if not fields: raise SyntaxError("No fields to update") 10147 10148 ret = self.db._adapter.update("%s" % table,self.query,fields) 10149 return ret
10150
10151 - def validate_and_update(self, **update_fields):
10152 tablename = self.db._adapter.get_table(self.query) 10153 response = Row() 10154 response.errors = Row() 10155 new_fields = copy.copy(update_fields) 10156 for key,value in update_fields.iteritems(): 10157 value,error = self.db[tablename][key].validate(value) 10158 if error: 10159 response.errors[key] = error 10160 else: 10161 new_fields[key] = value 10162 table = self.db[tablename] 10163 if response.errors: 10164 response.updated = None 10165 else: 10166 if not any(f(self,new_fields) for f in table._before_update): 10167 fields = table._listify(new_fields,update=True) 10168 if not fields: raise SyntaxError("No fields to update") 10169 ret = self.db._adapter.update(tablename,self.query,fields) 10170 ret and [f(self,new_fields) for f in table._after_update] 10171 else: 10172 ret = 0 10173 response.updated = ret 10174 return response
10175
10176 - def delete_uploaded_files(self, upload_fields=None):
10177 table = self.db[self.db._adapter.tables(self.query)[0]] 10178 # ## mind uploadfield==True means file is not in DB 10179 if upload_fields: 10180 fields = upload_fields.keys() 10181 else: 10182 fields = table.fields 10183 fields = [f for f in fields if table[f].type == 'upload' 10184 and table[f].uploadfield == True 10185 and table[f].autodelete] 10186 if not fields: 10187 return False 10188 for record in self.select(*[table[f] for f in fields]): 10189 for fieldname in fields: 10190 field = table[fieldname] 10191 oldname = record.get(fieldname, None) 10192 if not oldname: 10193 continue 10194 if upload_fields and oldname == upload_fields[fieldname]: 10195 continue 10196 if field.custom_delete: 10197 field.custom_delete(oldname) 10198 else: 10199 uploadfolder = field.uploadfolder 10200 if not uploadfolder: 10201 uploadfolder = pjoin( 10202 self.db._adapter.folder, '..', 'uploads') 10203 if field.uploadseparate: 10204 items = oldname.split('.') 10205 uploadfolder = pjoin( 10206 uploadfolder, 10207 "%s.%s" % (items[0], items[1]), 10208 items[2][:2]) 10209 oldpath = pjoin(uploadfolder, oldname) 10210 if exists(oldpath): 10211 os.unlink(oldpath) 10212 return False
10213
10214 -class RecordUpdater(object):
10215 - def __init__(self, colset, table, id):
10216 self.colset, self.db, self.tablename, self.id = \ 10217 colset, table._db, table._tablename, id
10218
10219 - def __call__(self, **fields):
10220 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10221 table = db[tablename] 10222 newfields = fields or dict(colset) 10223 for fieldname in newfields.keys(): 10224 if not fieldname in table.fields or table[fieldname].type=='id': 10225 del newfields[fieldname] 10226 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10227 colset.update(newfields) 10228 return colset
10229
10230 -class RecordDeleter(object):
10231 - def __init__(self, table, id):
10232 self.db, self.tablename, self.id = table._db, table._tablename, id
10233 - def __call__(self):
10234 return self.db(self.db[self.tablename]._id==self.id).delete()
10235
10236 -class LazyReferenceGetter(object):
10237 - def __init__(self, table, id):
10238 self.db, self.tablename, self.id = table._db, table._tablename, id
10239 - def __call__(self, other_tablename):
10240 if self.db._lazy_tables is False: 10241 raise AttributeError() 10242 table = self.db[self.tablename] 10243 other_table = self.db[other_tablename] 10244 for rfield in table._referenced_by: 10245 if rfield.table == other_table: 10246 return LazySet(rfield, self.id) 10247 10248 raise AttributeError()
10249
10250 -class LazySet(object):
10251 - def __init__(self, field, id):
10252 self.db, self.tablename, self.fieldname, self.id = \ 10253 field.db, field._tablename, field.name, id
10254 - def _getset(self):
10255 query = self.db[self.tablename][self.fieldname]==self.id 10256 return Set(self.db,query)
10257 - def __repr__(self):
10258 return repr(self._getset())
10259 - def __call__(self, query, ignore_common_filters=False):
10260 return self._getset()(query, ignore_common_filters)
10261 - def _count(self,distinct=None):
10262 return self._getset()._count(distinct)
10263 - def _select(self, *fields, **attributes):
10264 return self._getset()._select(*fields,**attributes)
10265 - def _delete(self):
10266 return self._getset()._delete()
10267 - def _update(self, **update_fields):
10268 return self._getset()._update(**update_fields)
10269 - def isempty(self):
10270 return self._getset().isempty()
10271 - def count(self,distinct=None, cache=None):
10272 return self._getset().count(distinct,cache)
10273 - def select(self, *fields, **attributes):
10274 return self._getset().select(*fields,**attributes)
10275 - def nested_select(self,*fields,**attributes):
10276 return self._getset().nested_select(*fields,**attributes)
10277 - def delete(self):
10278 return self._getset().delete()
10279 - def update(self, **update_fields):
10280 return self._getset().update(**update_fields)
10281 - def update_naive(self, **update_fields):
10282 return self._getset().update_naive(**update_fields)
10283 - def validate_and_update(self, **update_fields):
10284 return self._getset().validate_and_update(**update_fields)
10285 - def delete_uploaded_files(self, upload_fields=None):
10286 return self._getset().delete_uploaded_files(upload_fields)
10287
10288 -class VirtualCommand(object):
10289 - def __init__(self,method,row):
10290 self.method=method 10291 self.row=row
10292 - def __call__(self,*args,**kwargs):
10293 return self.method(self.row,*args,**kwargs)
10294
10295 -def lazy_virtualfield(f):
10296 f.__lazy__ = True 10297 return f
10298
10299 -class Rows(object):
10300 10301 """ 10302 A wrapper for the return value of a select. It basically represents a table. 10303 It has an iterator and each row is represented as a dictionary. 10304 """ 10305 10306 # ## TODO: this class still needs some work to care for ID/OID 10307
10308 - def __init__( 10309 self, 10310 db=None, 10311 records=[], 10312 colnames=[], 10313 compact=True, 10314 rawrows=None 10315 ):
10316 self.db = db 10317 self.records = records 10318 self.colnames = colnames 10319 self.compact = compact 10320 self.response = rawrows
10321
10322 - def __repr__(self):
10323 return '<Rows (%s)>' % len(self.records)
10324
10325 - def setvirtualfields(self,**keyed_virtualfields):
10326 """ 10327 db.define_table('x',Field('number','integer')) 10328 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10329 10330 from gluon.dal import lazy_virtualfield 10331 10332 class MyVirtualFields(object): 10333 # normal virtual field (backward compatible, discouraged) 10334 def normal_shift(self): return self.x.number+1 10335 # lazy virtual field (because of @staticmethod) 10336 @lazy_virtualfield 10337 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10338 db.x.virtualfields.append(MyVirtualFields()) 10339 10340 for row in db(db.x).select(): 10341 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10342 """ 10343 if not keyed_virtualfields: 10344 return self 10345 for row in self.records: 10346 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10347 attributes = dir(virtualfields) 10348 if not tablename in row: 10349 box = row[tablename] = Row() 10350 else: 10351 box = row[tablename] 10352 updated = False 10353 for attribute in attributes: 10354 if attribute[0] != '_': 10355 method = getattr(virtualfields,attribute) 10356 if hasattr(method,'__lazy__'): 10357 box[attribute]=VirtualCommand(method,row) 10358 elif type(method)==types.MethodType: 10359 if not updated: 10360 virtualfields.__dict__.update(row) 10361 updated = True 10362 box[attribute]=method() 10363 return self
10364
10365 - def __and__(self,other):
10366 if self.colnames!=other.colnames: 10367 raise Exception('Cannot & incompatible Rows objects') 10368 records = self.records+other.records 10369 return Rows(self.db,records,self.colnames)
10370
10371 - def __or__(self,other):
10372 if self.colnames!=other.colnames: 10373 raise Exception('Cannot | incompatible Rows objects') 10374 records = self.records 10375 records += [record for record in other.records \ 10376 if not record in records] 10377 return Rows(self.db,records,self.colnames)
10378
10379 - def __nonzero__(self):
10380 if len(self.records): 10381 return 1 10382 return 0
10383
10384 - def __len__(self):
10385 return len(self.records)
10386
10387 - def __getslice__(self, a, b):
10388 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10389
10390 - def __getitem__(self, i):
10391 row = self.records[i] 10392 keys = row.keys() 10393 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10394 return row[row.keys()[0]] 10395 return row
10396
10397 - def __iter__(self):
10398 """ 10399 iterator over records 10400 """ 10401 10402 for i in xrange(len(self)): 10403 yield self[i]
10404
10405 - def __str__(self):
10406 """ 10407 serializes the table into a csv file 10408 """ 10409 10410 s = StringIO.StringIO() 10411 self.export_to_csv_file(s) 10412 return s.getvalue()
10413
10414 - def first(self):
10415 if not self.records: 10416 return None 10417 return self[0]
10418
10419 - def last(self):
10420 if not self.records: 10421 return None 10422 return self[-1]
10423
10424 - def find(self,f,limitby=None):
10425 """ 10426 returns a new Rows object, a subset of the original object, 10427 filtered by the function f 10428 """ 10429 if not self: 10430 return Rows(self.db, [], self.colnames) 10431 records = [] 10432 if limitby: 10433 a,b = limitby 10434 else: 10435 a,b = 0,len(self) 10436 k = 0 10437 for row in self: 10438 if f(row): 10439 if a<=k: records.append(row) 10440 k += 1 10441 if k==b: break 10442 return Rows(self.db, records, self.colnames)
10443
10444 - def exclude(self, f):
10445 """ 10446 removes elements from the calling Rows object, filtered by the function f, 10447 and returns a new Rows object containing the removed elements 10448 """ 10449 if not self.records: 10450 return Rows(self.db, [], self.colnames) 10451 removed = [] 10452 i=0 10453 while i<len(self): 10454 row = self[i] 10455 if f(row): 10456 removed.append(self.records[i]) 10457 del self.records[i] 10458 else: 10459 i += 1 10460 return Rows(self.db, removed, self.colnames)
10461
10462 - def sort(self, f, reverse=False):
10463 """ 10464 returns a list of sorted elements (not sorted in place) 10465 """ 10466 rows = Rows(self.db,[],self.colnames,compact=False) 10467 rows.records = sorted(self,key=f,reverse=reverse) 10468 return rows
10469
10470 - def group_by_value(self, *fields, **args):
10471 """ 10472 regroups the rows, by one of the fields 10473 """ 10474 one_result = False 10475 if 'one_result' in args: 10476 one_result = args['one_result'] 10477 10478 def build_fields_struct(row, fields, num, groups): 10479 ''' helper function: 10480 ''' 10481 if num > len(fields)-1: 10482 if one_result: 10483 return row 10484 else: 10485 return [row] 10486 10487 key = fields[num] 10488 value = row[key] 10489 10490 if value not in groups: 10491 groups[value] = build_fields_struct(row, fields, num+1, {}) 10492 else: 10493 struct = build_fields_struct(row, fields, num+1, groups[ value ]) 10494 10495 # still have more grouping to do 10496 if type(struct) == type(dict()): 10497 groups[value].update() 10498 # no more grouping, first only is off 10499 elif type(struct) == type(list()): 10500 groups[value] += struct 10501 # no more grouping, first only on 10502 else: 10503 groups[value] = struct 10504 10505 return groups
10506 10507 if len(fields) == 0: 10508 return self 10509 10510 # if select returned no results 10511 if not self.records: 10512 return {} 10513 10514 grouped_row_group = dict() 10515 10516 # build the struct 10517 for row in self: 10518 build_fields_struct(row, fields, 0, grouped_row_group) 10519 10520 return grouped_row_group
10521
10522 - def render(self, i=None, fields=None):
10523 """ 10524 Takes an index and returns a copy of the indexed row with values 10525 transformed via the "represent" attributes of the associated fields. 10526 10527 If no index is specified, a generator is returned for iteration 10528 over all the rows. 10529 10530 fields -- a list of fields to transform (if None, all fields with 10531 "represent" attributes will be transformed). 10532 """ 10533 10534 10535 if i is None: 10536 return (self.render(i, fields=fields) for i in range(len(self))) 10537 import sqlhtml 10538 row = copy.deepcopy(self.records[i]) 10539 keys = row.keys() 10540 tables = [f.tablename for f in fields] if fields \ 10541 else [k for k in keys if k != '_extra'] 10542 for table in tables: 10543 repr_fields = [f.name for f in fields if f.tablename == table] \ 10544 if fields else [k for k in row[table].keys() 10545 if (hasattr(self.db[table], k) and 10546 isinstance(self.db[table][k], Field) 10547 and self.db[table][k].represent)] 10548 for field in repr_fields: 10549 row[table][field] = sqlhtml.represent( 10550 self.db[table][field], row[table][field], row[table]) 10551 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10552 return row[keys[0]] 10553 return row
10554
10555 - def as_list(self, 10556 compact=True, 10557 storage_to_dict=True, 10558 datetime_to_str=False, 10559 custom_types=None):
10560 """ 10561 returns the data as a list or dictionary. 10562 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10563 :param datetime_to_str: convert datetime fields as strings (default False) 10564 """ 10565 (oc, self.compact) = (self.compact, compact) 10566 if storage_to_dict: 10567 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10568 else: 10569 items = [item for item in self] 10570 self.compact = compact 10571 return items
10572 10573
10574 - def as_dict(self, 10575 key='id', 10576 compact=True, 10577 storage_to_dict=True, 10578 datetime_to_str=False, 10579 custom_types=None):
10580 """ 10581 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10582 10583 :param key: the name of the field to be used as dict key, normally the id 10584 :param compact: ? (default True) 10585 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10586 :param datetime_to_str: convert datetime fields as strings (default False) 10587 """ 10588 10589 # test for multiple rows 10590 multi = False 10591 f = self.first() 10592 if f and isinstance(key, basestring): 10593 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10594 if (not "." in key) and multi: 10595 # No key provided, default to int indices 10596 def new_key(): 10597 i = 0 10598 while True: 10599 yield i 10600 i += 1
10601 key_generator = new_key() 10602 key = lambda r: key_generator.next() 10603 10604 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10605 if isinstance(key,str) and key.count('.')==1: 10606 (table, field) = key.split('.') 10607 return dict([(r[table][field],r) for r in rows]) 10608 elif isinstance(key,str): 10609 return dict([(r[key],r) for r in rows]) 10610 else: 10611 return dict([(key(r),r) for r in rows]) 10612
10613 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10614 """ 10615 export data to csv, the first line contains the column names 10616 10617 :param ofile: where the csv must be exported to 10618 :param null: how null values must be represented (default '<NULL>') 10619 :param delimiter: delimiter to separate values (default ',') 10620 :param quotechar: character to use to quote string values (default '"') 10621 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10622 :param represent: use the fields .represent value (default False) 10623 :param colnames: list of column names to use (default self.colnames) 10624 This will only work when exporting rows objects!!!! 10625 DO NOT use this with db.export_to_csv() 10626 """ 10627 delimiter = kwargs.get('delimiter', ',') 10628 quotechar = kwargs.get('quotechar', '"') 10629 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10630 represent = kwargs.get('represent', False) 10631 writer = csv.writer(ofile, delimiter=delimiter, 10632 quotechar=quotechar, quoting=quoting) 10633 colnames = kwargs.get('colnames', self.colnames) 10634 write_colnames = kwargs.get('write_colnames',True) 10635 # a proper csv starting with the column names 10636 if write_colnames: 10637 writer.writerow(colnames) 10638 10639 def none_exception(value): 10640 """ 10641 returns a cleaned up value that can be used for csv export: 10642 - unicode text is encoded as such 10643 - None values are replaced with the given representation (default <NULL>) 10644 """ 10645 if value is None: 10646 return null 10647 elif isinstance(value, unicode): 10648 return value.encode('utf8') 10649 elif isinstance(value,Reference): 10650 return long(value) 10651 elif hasattr(value, 'isoformat'): 10652 return value.isoformat()[:19].replace('T', ' ') 10653 elif isinstance(value, (list,tuple)): # for type='list:..' 10654 return bar_encode(value) 10655 return value
10656 10657 for record in self: 10658 row = [] 10659 for col in colnames: 10660 if not REGEX_TABLE_DOT_FIELD.match(col): 10661 row.append(record._extra[col]) 10662 else: 10663 (t, f) = col.split('.') 10664 field = self.db[t][f] 10665 if isinstance(record.get(t, None), (Row,dict)): 10666 value = record[t][f] 10667 else: 10668 value = record[f] 10669 if field.type=='blob' and not value is None: 10670 value = base64.b64encode(value) 10671 elif represent and field.represent: 10672 value = field.represent(value) 10673 row.append(none_exception(value)) 10674 writer.writerow(row) 10675
10676 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10677 """ 10678 serializes the table using sqlhtml.SQLTABLE (if present) 10679 """ 10680 10681 if strict: 10682 ncols = len(self.colnames) 10683 return '<%s>\n%s\n</%s>' % (rows_name, 10684 '\n'.join(row.as_xml(row_name=row_name, 10685 colnames=self.colnames) for 10686 row in self), rows_name) 10687 10688 import sqlhtml 10689 return sqlhtml.SQLTABLE(self).xml()
10690
10691 - def as_xml(self,row_name='row',rows_name='rows'):
10692 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10693
10694 - def as_json(self, mode='object', default=None):
10695 """ 10696 serializes the rows to a JSON list or object with objects 10697 mode='object' is not implemented (should return a nested 10698 object structure) 10699 """ 10700 10701 items = [record.as_json(mode=mode, default=default, 10702 serialize=False, 10703 colnames=self.colnames) for 10704 record in self] 10705 10706 if have_serializers: 10707 return serializers.json(items, 10708 default=default or 10709 serializers.custom_json) 10710 elif simplejson: 10711 return simplejson.dumps(items) 10712 else: 10713 raise RuntimeError("missing simplejson")
10714 10715 # for consistent naming yet backwards compatible 10716 as_csv = __str__ 10717 json = as_json 10718
10719 10720 ################################################################################ 10721 # dummy function used to define some doctests 10722 ################################################################################ 10723 10724 -def test_all():
10725 """ 10726 10727 >>> if len(sys.argv)<2: db = DAL("sqlite://test.db") 10728 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10729 >>> tmp = db.define_table('users',\ 10730 Field('stringf', 'string', length=32, required=True),\ 10731 Field('booleanf', 'boolean', default=False),\ 10732 Field('passwordf', 'password', notnull=True),\ 10733 Field('uploadf', 'upload'),\ 10734 Field('blobf', 'blob'),\ 10735 Field('integerf', 'integer', unique=True),\ 10736 Field('doublef', 'double', unique=True,notnull=True),\ 10737 Field('jsonf', 'json'),\ 10738 Field('datef', 'date', default=datetime.date.today()),\ 10739 Field('timef', 'time'),\ 10740 Field('datetimef', 'datetime'),\ 10741 migrate='test_user.table') 10742 10743 Insert a field 10744 10745 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10746 uploadf=None, integerf=5, doublef=3.14,\ 10747 jsonf={"j": True},\ 10748 datef=datetime.date(2001, 1, 1),\ 10749 timef=datetime.time(12, 30, 15),\ 10750 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10751 1 10752 10753 Drop the table 10754 10755 >>> db.users.drop() 10756 10757 Examples of insert, select, update, delete 10758 10759 >>> tmp = db.define_table('person',\ 10760 Field('name'),\ 10761 Field('birth','date'),\ 10762 migrate='test_person.table') 10763 >>> person_id = db.person.insert(name='Marco',birth='2005-06-22') 10764 >>> person_id = db.person.insert(name='Massimo',birth='1971-12-21') 10765 10766 commented len(db().select(db.person.ALL)) 10767 commented 2 10768 10769 >>> me = db(db.person.id==person_id).select()[0] # test select 10770 >>> me.name 10771 'Massimo' 10772 >>> db.person[2].name 10773 'Massimo' 10774 >>> db.person(2).name 10775 'Massimo' 10776 >>> db.person(name='Massimo').name 10777 'Massimo' 10778 >>> db.person(db.person.name=='Massimo').name 10779 'Massimo' 10780 >>> row = db.person[2] 10781 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10782 True 10783 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10784 1 10785 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10786 1 10787 10788 Update a single record 10789 10790 >>> me.update_record(name="Max") 10791 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10792 >>> me.name 10793 'Max' 10794 10795 Examples of complex search conditions 10796 10797 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10798 1 10799 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10800 1 10801 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10802 1 10803 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10804 >>> me.name 10805 'Max' 10806 10807 Examples of search conditions using extract from date/datetime/time 10808 10809 >>> len(db(db.person.birth.month()==12).select()) 10810 1 10811 >>> len(db(db.person.birth.year()>1900).select()) 10812 1 10813 10814 Example of usage of NULL 10815 10816 >>> len(db(db.person.birth==None).select()) ### test NULL 10817 0 10818 >>> len(db(db.person.birth!=None).select()) ### test NULL 10819 1 10820 10821 Examples of search conditions using lower, upper, and like 10822 10823 >>> len(db(db.person.name.upper()=='MAX').select()) 10824 1 10825 >>> len(db(db.person.name.like('%ax')).select()) 10826 1 10827 >>> len(db(db.person.name.upper().like('%AX')).select()) 10828 1 10829 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10830 0 10831 10832 orderby, groupby and limitby 10833 10834 >>> people = db().select(db.person.name, orderby=db.person.name) 10835 >>> order = db.person.name|~db.person.birth 10836 >>> people = db().select(db.person.name, orderby=order) 10837 10838 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10839 10840 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10841 10842 Example of one 2 many relation 10843 10844 >>> tmp = db.define_table('dog',\ 10845 Field('name'),\ 10846 Field('birth','date'),\ 10847 Field('owner',db.person),\ 10848 migrate='test_dog.table') 10849 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10850 1 10851 10852 A simple JOIN 10853 10854 >>> len(db(db.dog.owner==db.person.id).select()) 10855 1 10856 10857 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10858 1 10859 10860 Drop tables 10861 10862 >>> db.dog.drop() 10863 >>> db.person.drop() 10864 10865 Example of many 2 many relation and Set 10866 10867 >>> tmp = db.define_table('author', Field('name'),\ 10868 migrate='test_author.table') 10869 >>> tmp = db.define_table('paper', Field('title'),\ 10870 migrate='test_paper.table') 10871 >>> tmp = db.define_table('authorship',\ 10872 Field('author_id', db.author),\ 10873 Field('paper_id', db.paper),\ 10874 migrate='test_authorship.table') 10875 >>> aid = db.author.insert(name='Massimo') 10876 >>> pid = db.paper.insert(title='QCD') 10877 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10878 10879 Define a Set 10880 10881 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10882 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10883 >>> for row in rows: print row.author.name, row.paper.title 10884 Massimo QCD 10885 10886 Example of search condition using belongs 10887 10888 >>> set = (1, 2, 3) 10889 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10890 >>> print rows[0].title 10891 QCD 10892 10893 Example of search condition using nested select 10894 10895 >>> nested_select = db()._select(db.authorship.paper_id) 10896 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10897 >>> print rows[0].title 10898 QCD 10899 10900 Example of expressions 10901 10902 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10903 >>> db(mynumber).delete() 10904 0 10905 >>> for i in range(10): tmp = mynumber.insert(x=i) 10906 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10907 45 10908 10909 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10910 5 10911 10912 Output in csv 10913 10914 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10915 author.name,paper.title\r 10916 Massimo,QCD 10917 10918 Delete all leftover tables 10919 10920 >>> DAL.distributed_transaction_commit(db) 10921 10922 >>> db.mynumber.drop() 10923 >>> db.authorship.drop() 10924 >>> db.author.drop() 10925 >>> db.paper.drop() 10926 """
10927 ################################################################################ 10928 # deprecated since the new DAL; here only for backward compatibility 10929 ################################################################################ 10930 10931 SQLField = Field 10932 SQLTable = Table 10933 SQLXorable = Expression 10934 SQLQuery = Query 10935 SQLSet = Set 10936 SQLRows = Rows 10937 SQLStorage = Row 10938 SQLDB = DAL 10939 GQLDB = DAL 10940 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10941 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10942 10943 ################################################################################ 10944 # Geodal utils 10945 ################################################################################ 10946 10947 -def geoPoint(x,y):
10948 return "POINT (%f %f)" % (x,y)
10949
10950 -def geoLine(*line):
10951 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10952
10953 -def geoPolygon(*line):
10954 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10955 10956 ################################################################################ 10957 # run tests 10958 ################################################################################ 10959 10960 if __name__ == '__main__': 10961 import doctest 10962 doctest.testmod() 10963