From c38e8f0f2d6b71c77620dc8a96a4e9401b114619 Mon Sep 17 00:00:00 2001 From: Eduardo Cereto Date: Sat, 22 Jan 2011 07:32:59 -0200 Subject: [PATCH 0001/1758] pt_BR .po file added --- .../locale/pt_BR/LC_MESSAGES/django.po | 369 ++++++++++++++++++ 1 file changed, 369 insertions(+) create mode 100644 debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po diff --git a/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po b/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po new file mode 100644 index 000000000..355629b8b --- /dev/null +++ b/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po @@ -0,0 +1,369 @@ +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Percy Pérez-Pinedo, 2009. +# +# +msgid "" +msgstr "" +"Project-Id-Version: Django Debug Toolbar 0.8.4\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-01-22 08:58+0000\n" +"PO-Revision-Date: 2011-01-22 08:58+0000\n" +"Last-Translator: Eduardo Cereto Carvalho \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: panels/cache.py:92 +#, python-format +msgid "Cache: %.2fms" +msgstr "" + +#: panels/cache.py:95 +msgid "Cache Usage" +msgstr "Uso do Cache" + +#: panels/headers.py:36 panels/headers.py:39 +msgid "HTTP Headers" +msgstr "Cabeçalhos HTTP" + +#: panels/logger.py:56 +msgid "Logging" +msgstr "" + +#: panels/logger.py:63 +msgid "Log Messages" +msgstr "Mensagens de Log" + +#: panels/request_vars.py:13 panels/request_vars.py:16 +msgid "Request Vars" +msgstr "Variáveis do Request" + +#: panels/settings_vars.py:16 +msgid "Settings" +msgstr "Configurações" + +#: panels/settings_vars.py:19 +#, python-format +msgid "Settings from %s" +msgstr "Configurações de %s" + +#: panels/signals.py:39 panels/signals.py:42 +msgid "Signals" +msgstr "Sinais" + +#: panels/sql.py:146 +msgid "SQL" +msgstr "SQL" + +#: panels/sql.py:160 +msgid "SQL Queries" +msgstr "Queries SQL" + +#: panels/template.py:47 +msgid "Templates" +msgstr "Templates" + +#: panels/template.py:52 +#, python-format +msgid "Templates (%(num_templates)s rendered)" +msgstr "Templates (%(num_templates)s renderizados)" + +#: panels/timer.py:35 templates/debug_toolbar/panels/cache.html:39 +#: templates/debug_toolbar/panels/logger.html:7 +#: templates/debug_toolbar/panels/sql.html:5 +#: templates/debug_toolbar/panels/sql_explain.html:11 +#: templates/debug_toolbar/panels/sql_profile.html:12 +#: templates/debug_toolbar/panels/sql_select.html:11 +msgid "Time" +msgstr "Tempo" + +#: panels/timer.py:47 +msgid "Resource Usage" +msgstr "Uso de Recursos" + +#: panels/timer.py:78 +msgid "User CPU time" +msgstr "Tempo de CPU do usuário" + +#: panels/timer.py:79 +msgid "System CPU time" +msgstr "Tempo de CPU do sistema" + +#: panels/timer.py:80 +msgid "Total CPU time" +msgstr "Tempo de CPU total" + +#: panels/timer.py:81 +msgid "Elapsed time" +msgstr "Tempo decorrido" + +#: panels/timer.py:82 +msgid "Context switches" +msgstr "Alterações de contexto" + +#: panels/version.py:20 panels/version.py:29 +msgid "Versions" +msgstr "Versões" + +#: templates/debug_toolbar/base.html:23 +msgid "Hide Toolbar" +msgstr "Esconder Barra" + +#: templates/debug_toolbar/base.html:23 +msgid "Hide" +msgstr "Esconder" + +#: templates/debug_toolbar/base.html:48 +msgid "Show Toolbar" +msgstr "Mostrar Barra" + +#: templates/debug_toolbar/base.html:54 +msgid "Close" +msgstr "Fechar" + +#: templates/debug_toolbar/redirect.html:7 +#: templates/debug_toolbar/panels/logger.html:9 +msgid "Location" +msgstr "Localização" + +#: templates/debug_toolbar/redirect.html:9 +msgid "" +"The Django Debug Toolbar has intercepted a redirect to the above URL for " +"debug viewing purposes. You can click the above link to continue with the " +"redirect as normal. If you'd like to disable this feature, set the " +"DEBUG_TOOLBAR_CONFIG dictionary's key " +"INTERCEPT_REDIRECTS to False." +msgstr "" +"A Barra de Debug do Django interceptou um redirecionamento para a URL acima +por " +"propósito de debug. Você pode clicar no link acima para continuar com o " +"redirecionamento normal. Se você gostaria de desabilitar essa funcionalidade, +configure a " +"chave do dicionário " +"DEBUG_TOOLBAR_CONFIG para False" + +#: templates/debug_toolbar/panels/cache.html:14 +msgid "Total Calls" +msgstr "Total de Chamadas" + +#: templates/debug_toolbar/panels/cache.html:16 +msgid "Total Time" +msgstr "Tempo Total" + +#: templates/debug_toolbar/panels/cache.html:18 +msgid "Hits" +msgstr "" + +#: templates/debug_toolbar/panels/cache.html:20 +msgid "Misses" +msgstr "" + +#: templates/debug_toolbar/panels/cache.html:35 +msgid "Breakdown" +msgstr "" + +#: templates/debug_toolbar/panels/cache.html:40 +msgid "Type" +msgstr "Tipo" + +#: templates/debug_toolbar/panels/cache.html:41 +msgid "Parameters" +msgstr "Parâmetros" + +#: templates/debug_toolbar/panels/cache.html:42 +msgid "Function" +msgstr "Função" + +#: templates/debug_toolbar/panels/headers.html:5 +msgid "Key" +msgstr "Chave" + +#: templates/debug_toolbar/panels/headers.html:6 +#: templates/debug_toolbar/panels/request_vars.html:37 +#: templates/debug_toolbar/panels/request_vars.html:63 +#: templates/debug_toolbar/panels/request_vars.html:85 +#: templates/debug_toolbar/panels/request_vars.html:107 +#: templates/debug_toolbar/panels/settings_vars.html:6 +#: templates/debug_toolbar/panels/timer.html:10 +msgid "Value" +msgstr "Valor" + +#: templates/debug_toolbar/panels/logger.html:6 +msgid "Level" +msgstr "Nível" + +#: templates/debug_toolbar/panels/logger.html:8 +msgid "Message" +msgstr "Mensagem" + +#: templates/debug_toolbar/panels/logger.html:24 +msgid "No messages logged" +msgstr "Nenhuma mensagem logada" + +#: templates/debug_toolbar/panels/request_vars.html:3 +msgid "View information" +msgstr "Ver informações" + +#: templates/debug_toolbar/panels/request_vars.html:7 +msgid "View Function" +msgstr "Ver Função" + +#: templates/debug_toolbar/panels/request_vars.html:8 +msgid "args" +msgstr "" + +#: templates/debug_toolbar/panels/request_vars.html:9 +msgid "kwargs" +msgstr "" + +#: templates/debug_toolbar/panels/request_vars.html:27 +msgid "COOKIES Variables" +msgstr "Variáveis do COOKIE" + +#: templates/debug_toolbar/panels/request_vars.html:36 +#: templates/debug_toolbar/panels/request_vars.html:62 +#: templates/debug_toolbar/panels/request_vars.html:84 +#: templates/debug_toolbar/panels/request_vars.html:106 +msgid "Variable" +msgstr "Variável" + +#: templates/debug_toolbar/panels/request_vars.html:50 +msgid "No COOKIE data" +msgstr "Não há dados de COOKIE" + +#: templates/debug_toolbar/panels/request_vars.html:53 +msgid "SESSION Variables" +msgstr "Variáveis de SESSION" + +#: templates/debug_toolbar/panels/request_vars.html:76 +msgid "No SESSION data" +msgstr "Não há dados de SESSION" + +#: templates/debug_toolbar/panels/request_vars.html:79 +msgid "GET Variables" +msgstr "Variáveis de GET" + +#: templates/debug_toolbar/panels/request_vars.html:98 +msgid "No GET data" +msgstr "Não há dados de GET" + +#: templates/debug_toolbar/panels/request_vars.html:101 +msgid "POST Variables" +msgstr "Variáveis de POST" + +#: templates/debug_toolbar/panels/request_vars.html:120 +msgid "No POST data" +msgstr "Não há dados de POST" + +#: templates/debug_toolbar/panels/settings_vars.html:5 +msgid "Setting" +msgstr "Configuração" + +#: templates/debug_toolbar/panels/signals.html:5 +msgid "Signal" +msgstr "Sinais" + +#: templates/debug_toolbar/panels/signals.html:6 +msgid "Providing Args" +msgstr "Argumentos Fornecidos" + +#: templates/debug_toolbar/panels/signals.html:7 +msgid "Receivers" +msgstr "" + +#: templates/debug_toolbar/panels/sql.html:6 +msgid "Action" +msgstr "Ação" + +#: templates/debug_toolbar/panels/sql.html:7 +msgid "Stacktrace" +msgstr "" + +#: templates/debug_toolbar/panels/sql.html:8 +msgid "Query" +msgstr "" + +#: templates/debug_toolbar/panels/sql.html:38 +msgid "Line" +msgstr "Linha" + +#: templates/debug_toolbar/panels/sql.html:39 +msgid "Method" +msgstr "Método" + +#: templates/debug_toolbar/panels/sql.html:40 +msgid "File" +msgstr "Arquivo" + +#: templates/debug_toolbar/panels/sql_explain.html:3 +#: templates/debug_toolbar/panels/sql_profile.html:3 +#: templates/debug_toolbar/panels/sql_select.html:3 +#: templates/debug_toolbar/panels/template_source.html:3 +msgid "Back" +msgstr "Voltar" + +#: templates/debug_toolbar/panels/sql_explain.html:4 +msgid "SQL Explained" +msgstr "SQL Explicada" + +#: templates/debug_toolbar/panels/sql_explain.html:9 +#: templates/debug_toolbar/panels/sql_profile.html:10 +#: templates/debug_toolbar/panels/sql_select.html:9 +msgid "Executed SQL" +msgstr "SQL Executada" + +#: templates/debug_toolbar/panels/sql_profile.html:4 +msgid "SQL Profiled" +msgstr "Perfil da SQL" + +#: templates/debug_toolbar/panels/sql_profile.html:35 +msgid "Error" +msgstr "Erro" + +#: templates/debug_toolbar/panels/sql_select.html:4 +msgid "SQL Selected" +msgstr "SQL Selecionada" + +#: templates/debug_toolbar/panels/sql_select.html:34 +msgid "Empty set" +msgstr "Conjunto vazio" + +#: templates/debug_toolbar/panels/template_source.html:4 +msgid "Template Source" +msgstr "Origem do Template" + +#: templates/debug_toolbar/panels/templates.html:2 +msgid "Template path" +msgstr "Caminho do Template" + +#: templates/debug_toolbar/panels/templates.html:13 +msgid "Template" +msgstr "Template" + +#: templates/debug_toolbar/panels/templates.html:21 +#: templates/debug_toolbar/panels/templates.html:37 +msgid "Toggle Context" +msgstr "Trocar Contexto" + +#: templates/debug_toolbar/panels/templates.html:28 +#: templates/debug_toolbar/panels/templates.html:43 +msgid "None" +msgstr "Nenhum" + +#: templates/debug_toolbar/panels/templates.html:31 +msgid "Context processor" +msgstr "Processador do Contexto" + +#: templates/debug_toolbar/panels/timer.html:9 +msgid "Resource" +msgstr "Recurso" + +#: templates/debug_toolbar/panels/versions.html:6 +msgid "Package" +msgstr "Pacote" + +#: templates/debug_toolbar/panels/versions.html:7 +msgid "Version" +msgstr "Versão" From 3df1a11bf5c3bb2bc7c4e1055d496ceeb15c2472 Mon Sep 17 00:00:00 2001 From: eduardocereto Date: Sat, 22 Jan 2011 07:58:17 -0200 Subject: [PATCH 0002/1758] Compiled pt_BR po file --- debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo | Bin 0 -> 4314 bytes debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po | 6 ++---- 2 files changed, 2 insertions(+), 4 deletions(-) create mode 100644 debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo diff --git a/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo b/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..b12a2689991eb4f3e0f6bb157a5cfe8ba64c87b8 GIT binary patch literal 4314 zcma);O^h5z6~`+ffLUw;4j}{rDLx$AIJ4e0F;{9`GIDKJcyJ!{8gi zBffnad^_rOa4#5uZvhp^ahH7iWsvJV4ekLy2kryE?EA0z{u{pjJ@5_a{~5@2e(C#% zAm_gXa=+h$Zw7CI_=#8WdK>spkn+9?a@-yax);13+z%cADbEbZ@uxuiL<_G8F!1dg zAm^tb_kGg0e;%Y>z5-It?|?kdb0Ek61mym|^7WTN{KQ}I;yQl=-v|B+31hIs81f(2Cef@Ee>#f!zNzNO_td_1y-!zw+%4W#3w<@^C`~_kaBK;Jm1qG z<@^G8A9xLf>BSGgkAOb~FTj3(1*x~=P!9Jw0dn4Xkox`%NV)nT?ei2!eSH?>ylWuW zeHP?-p9i`Biy%}eehzY-mq2VM8Yq-=9);t12e>cyqs*Yt z{(PoTpw`kJymOyGX`)P`@OeMVhfwIxv_bjIg}Je*m{;K?QE#`;FE%=IT{)vRW1We* z3VT|vWvZ)1J+>L%HqG_bT=ty}b(UE-Zpp5g9HTSQh*dw+k<3k^MI-6w+cML+XgG(_ zjjK8=a`bjCv_#XyS~QEalHqJ4*tPY!AXt&-w2HLL#5oh`@+AXn<4xtndA%)KdZ1&` zGN~3V8>;dkE!&k#5DXG$!he8y&qiX|%Ib(RQn_N4TaLb0+MS(Br_t#wwwK49iClzYY5fr(7om2<9}iB)~M&{@t?vXa@tg?jwq^*q{o zXJM)eMK-SXV};X3Wesa@L4Q@{LZO|(i8-vJV;w@pqNDR1dyk0Tsv(nG&L;9?Xd`|4 zKz8zI`5`(zdu6QOj_IZ<78NjaJIi$<>nm%OSp%IwmRyU`hwM~`8Q1$N?~Qx2oiBOl zz@fBAHn3c|LUuTlPNy)ljt+|e?zV7IrClo*^k&f=$s>CzlLoE_h4tw^3X>w^a8Q|B znPZyTv;+9+YD;ceC!=!q!00Q~e!p;icyBf-<)>{SLzT)fHsPgR6JwL2YGnuqH))|I z+__iwk2it{sdb5pvCQI@+_uHRNXF)p=Df&cyfc_#pkJ->!s$op_~javt`{2fYiBot zcDpq{x4KbpFEakWtoI-?obv|6^Q z;!OM2NfGec1#nXc<8Ldfxr%pLud1pzhGEL@YSMOJi?vLfAqF9GhGhBFP zVjT*W;~QMoObY!c)op~2UWAEii&4!_nSE{ZPEF2CJ~pwc`_|>PrL1eB+I-Q?YJr`V z6D#eYR(DV-wyrI}>t^M#sp;w3)O79GF*!9edwk}|6dnw$t?GfHh_5@YrLw#hIF)Ab zVe4jPBPx`O5FX%#P-|Vefr@+3udX`6vi(dqQ6`=YZE|{|1s^SN+iQVV31nPaTx#r` zae8uUqSSqgeW2A@$)I_(yuK%7*u(wT`Kh&_S)17zM|rlit2I*GM|kb5oY^$_gqR!N z7=Fu&bsOghe6+>z`ar|WC~z6FVKg`(xkMBw!_)HdhYC?Xm_?Upua}5kH}(Kr@bOkGN~jlwc7gkr9U9L`rZrgU-4I7RVuB)4EiLP&Mw+F2z~t^8_+OSh&nq=GAB}tGABU zawOjFG+{ytwxS(U1k@qf=W|5)J;oxpLR*~4P4 z5{cVaDFrL~6f;gJ$O%O<$|zJxX(8E1CP$`WPEnzO{RDEBUG_TOOLBAR_CONFIG dictionary's key " "INTERCEPT_REDIRECTS to False." msgstr "" -"A Barra de Debug do Django interceptou um redirecionamento para a URL acima -por " +"A Barra de Debug do Django interceptou um redirecionamento para a URL acima por " "propósito de debug. Você pode clicar no link acima para continuar com o " -"redirecionamento normal. Se você gostaria de desabilitar essa funcionalidade, -configure a " +"redirecionamento normal. Se você gostaria de desabilitar essa funcionalidade, configure a " "chave do dicionário " "DEBUG_TOOLBAR_CONFIG para False" From 34cecffbacf85ff21e9c8b0d01e1dcc644559859 Mon Sep 17 00:00:00 2001 From: eduardocereto Date: Sat, 22 Jan 2011 08:07:19 -0200 Subject: [PATCH 0003/1758] pt_BR updated --- .../locale/pt_BR/LC_MESSAGES/django.mo | Bin 4314 -> 4320 bytes .../locale/pt_BR/LC_MESSAGES/django.po | 6 +++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo b/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo index b12a2689991eb4f3e0f6bb157a5cfe8ba64c87b8..8c0c45f0725ce90fb5d10b69953cb5a3f4c37289 100644 GIT binary patch delta 153 zcmcbm_&{-k3`?v40|SFKkPrgWK0sO!NXG+dZXjIYHC&QZ(6Kphm^5H1F{8!;AA3!ZK6K W89ejS@{4j4fg)Q|ixpB5A))}l@E^+n delta 147 zcmaE$cuR4E3`;CO0|SG#00V;%koE@Bf Date: Sat, 2 Apr 2011 23:01:09 -0300 Subject: [PATCH 0004/1758] Updated pt/BR strings --- .../locale/pt_BR/LC_MESSAGES/django.mo | Bin 4320 -> 4694 bytes .../locale/pt_BR/LC_MESSAGES/django.po | 22 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo b/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.mo index 8c0c45f0725ce90fb5d10b69953cb5a3f4c37289..0fa0a134fee228717610126bb6ca6f1fd4d66ffd 100644 GIT binary patch delta 2048 zcmY+EZD`v?9LImHZTgTjYo}AIt+s2IX|1ct~LGOU6df&=YvqBy6ZxCebPg}zW3!f=A0Z+hB!-1k0r_rJT} z!(DQr_1pHsxuq4a8f64Eq5b7%Q|edJQ04+=R69je0l$JZ@GNYA=V1{34(s6GP+Nml z3*Q@HJ>xbQg56O2dm-xzHsn5xLKzr`)i42d;I#X`$9;d^jbC)*H{f-wdlSm=+iw0H zsB?}$8J>kyJL&j23}D~RlsabL!!QdjLPhi&)W)lD39R9?rLYmo(N3uKt07CQ2iC$t zH$MjD&;(S(lThd7Ak_*qVeH!rbd<{1p&~g975NdU4Ie={H0Q?OKpDIUsrD;PGyDT; zeLdE=1#5;{*AC@aCsZKoq4sTo1x<|5k-;Y*)iN{@_zaAenH_+N_$N+L#FrfZgmSEs zMg|(7)-^$$*9w(^E+~WRpd8xd#+!rWUkhVQ=#e}QbwCCx0w4C{z#cdLnD8|JDU_pM zz#HIqkZSWZI`0b9x+(@Y!7#iTMxX+I2r7_R9r>5RaVDsippl0=p*-FVbv1jT9DK#i zAA&mIDAfKFP?3HDWq8hwzl2mfOQZciL#@9Ib^etC9T_a=G;OSf^0WaigB|cj*aI8k zV^9(8gj&B3>QTPz#s?wEvUj2O&q6tN+RcCM=FdUdEnKFflw5TmLJaf>mqUEBh+{X@ z!BI%H9W-pQaW|fU%NailweKw`2ah_w5A{ZlLj~{|Bm)IIPe&2_0HNFtuh8#bm{a((he`XJT7RJ$}!Q8=&5R^(OM^Y{sJT#FRyIUVJK<-IcMvSZrwP zz*x(p@mxASG41)*S8NG{Lh;0;x3Oi#sx?o}_{EOEOBI9ZykGn<5NY0?O{LPAlnr~n zA5VEOKbcM1aN74hzc?S*8yu#@m$Kc zsa?Xc@!ChqYW{Pm_+GHBuJ3==&jdeMu)Z&}wh8M#CNnZUW5Xv8=d;P|zs668j@K_5 mpYW2|oL79V?n3R@qJ1y?V_9|AfS<@_l3ua5{y^v&XY3!UfZt94 delta 1641 zcmY+^OGs2v9LMqhSUFS9jHbO`P7iz8OM7UU<|AE%Q%Ek_4TDW3l0)XIhXWQAQ4m}r zC=!Amh|;0~E3!=wj3_9I3aUYfY*TH5XwjnYZ#*qJ-2eTYbMCq4{?9p%Z+emA>wRYDQ44#`LoMvR z^DCy5(Jv}mVGcJ945210L~V5hnZuT%5?Slo6{vBws59S*+xcJ&mH0I#mjgHrZ=-f% z#Pz?)WB+H;@qvz6_!G6lAfuG&JXAuZs1L0}B~XJ(s0p>@yHE+lU4Iwq{iCP_oIxdW z-nB2H=DQMP|MlVk9hz_$^?@g-gkIou9K#v-2WMjuZ!7WDsEHdj}M}UxHSskE+xVwW_$X3i)4U8(q5&w-C!+Tiaen zXq8J`pBDJ9tfn&8D$KUJo`_TDtHM!di;iUlv1z<%C0OCLOe#vvp{6x%C$u(|DnbXc znb1LKkCqWC8;G^p+V~H&ntD0WK&*HD+RKGRscZ97`mbnjIT|ZAi&`ZWZKldPg00LR zf_`kTf3MdSxSy_^yeTJrByYFRdly{f`9nkA!%%raG|{QS2QB)jzdN}%=@o>By-YZh N9?Adc^9HB({RJ7TjT!&| diff --git a/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po b/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po index d80930c14..80a1bd26a 100644 --- a/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po +++ b/debug_toolbar/locale/pt_BR/LC_MESSAGES/django.po @@ -18,7 +18,7 @@ msgstr "" #: panels/cache.py:92 #, python-format msgid "Cache: %.2fms" -msgstr "" +msgstr "Cache: %.2fms" #: panels/cache.py:95 msgid "Cache Usage" @@ -30,7 +30,7 @@ msgstr "Cabeçalhos HTTP" #: panels/logger.py:56 msgid "Logging" -msgstr "" +msgstr "Logs" #: panels/logger.py:63 msgid "Log Messages" @@ -152,15 +152,15 @@ msgstr "Tempo Total" #: templates/debug_toolbar/panels/cache.html:18 msgid "Hits" -msgstr "" +msgstr "Hits" #: templates/debug_toolbar/panels/cache.html:20 msgid "Misses" -msgstr "" +msgstr "Misses" #: templates/debug_toolbar/panels/cache.html:35 msgid "Breakdown" -msgstr "" +msgstr "Breakdown" #: templates/debug_toolbar/panels/cache.html:40 msgid "Type" @@ -210,11 +210,11 @@ msgstr "Função View" #: templates/debug_toolbar/panels/request_vars.html:8 msgid "args" -msgstr "" +msgstr "args" #: templates/debug_toolbar/panels/request_vars.html:9 msgid "kwargs" -msgstr "" +msgstr "kwargs" #: templates/debug_toolbar/panels/request_vars.html:27 msgid "COOKIES Variables" @@ -269,7 +269,7 @@ msgstr "Argumentos Fornecidos" #: templates/debug_toolbar/panels/signals.html:7 msgid "Receivers" -msgstr "" +msgstr "Recebedores" #: templates/debug_toolbar/panels/sql.html:6 msgid "Action" @@ -277,11 +277,11 @@ msgstr "Ação" #: templates/debug_toolbar/panels/sql.html:7 msgid "Stacktrace" -msgstr "" +msgstr "Stacktrace" #: templates/debug_toolbar/panels/sql.html:8 msgid "Query" -msgstr "" +msgstr "Query" #: templates/debug_toolbar/panels/sql.html:38 msgid "Line" @@ -343,7 +343,7 @@ msgstr "Template" #: templates/debug_toolbar/panels/templates.html:21 #: templates/debug_toolbar/panels/templates.html:37 msgid "Toggle Context" -msgstr "Mostrar Contexto" +msgstr "Mostrar/Esconder Contexto" #: templates/debug_toolbar/panels/templates.html:28 #: templates/debug_toolbar/panels/templates.html:43 From d0c424c0c6a26ad9ea190319ad5d1e5872d63f6c Mon Sep 17 00:00:00 2001 From: Danilo Bargen Date: Thu, 19 May 2011 19:15:37 +0200 Subject: [PATCH 0005/1758] Display execution time after query --- debug_toolbar/management/commands/debugsqlshell.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/debug_toolbar/management/commands/debugsqlshell.py b/debug_toolbar/management/commands/debugsqlshell.py index eaeafd497..e32340153 100644 --- a/debug_toolbar/management/commands/debugsqlshell.py +++ b/debug_toolbar/management/commands/debugsqlshell.py @@ -1,5 +1,6 @@ import os from optparse import make_option +from datetime import datetime from django.core.management.base import NoArgsCommand from django.db.backends import util @@ -8,12 +9,16 @@ class PrintQueryWrapper(util.CursorDebugWrapper): def execute(self, sql, params=()): + starttime = datetime.today() try: return self.cursor.execute(sql, params) finally: raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params) + execution_time = datetime.today() - starttime print sqlparse.format(raw_sql, reindent=True) print + print 'Execution time: %fs' % execution_time.total_seconds() + print util.CursorDebugWrapper = PrintQueryWrapper From 729d75d59fbc313f2e421a953bbf16961bba0744 Mon Sep 17 00:00:00 2001 From: Seth Mason Date: Mon, 8 Aug 2011 16:58:31 -0700 Subject: [PATCH 0006/1758] Fixes issue #92 -- ipython API changed so debugsqlshell stopped working. Instead of copying and pasting, just import django.core.management.commands.shell.Command. --- .../management/commands/debugsqlshell.py | 60 +------------------ 1 file changed, 1 insertion(+), 59 deletions(-) diff --git a/debug_toolbar/management/commands/debugsqlshell.py b/debug_toolbar/management/commands/debugsqlshell.py index eaeafd497..71723fa6e 100644 --- a/debug_toolbar/management/commands/debugsqlshell.py +++ b/debug_toolbar/management/commands/debugsqlshell.py @@ -1,8 +1,8 @@ import os from optparse import make_option -from django.core.management.base import NoArgsCommand from django.db.backends import util +from django.core.management.commands.shell import Command from debug_toolbar.utils import sqlparse @@ -16,61 +16,3 @@ def execute(self, sql, params=()): print util.CursorDebugWrapper = PrintQueryWrapper - -# The rest is copy/paste from django/core/management/commands/shell.py - -class Command(NoArgsCommand): - option_list = NoArgsCommand.option_list + ( - make_option('--plain', action='http://github.com/store_true', dest='plain', - help='Tells Django to use plain Python, not IPython.'), - ) - help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available." - - requires_model_validation = False - - def handle_noargs(self, **options): - # XXX: (Temporary) workaround for ticket #1796: force early loading of all - # models from installed apps. - from django.db.models.loading import get_models - loaded_models = get_models() - - use_plain = options.get('plain', False) - - try: - if use_plain: - # Don't bother loading IPython, because the user wants plain Python. - raise ImportError - import IPython - # Explicitly pass an empty list as arguments, because otherwise IPython - # would use sys.argv from this script. - shell = IPython.Shell.IPShell(argv=[]) - shell.mainloop() - except ImportError: - import code - # Set up a dictionary to serve as the environment for the shell, so - # that tab completion works on objects that are imported at runtime. - # See ticket 5082. - imported_objects = {} - try: # Try activating rlcompleter, because it's handy. - import readline - except ImportError: - pass - else: - # We don't have to wrap the following import in a 'try', because - # we already know 'readline' was imported successfully. - import rlcompleter - readline.set_completer(rlcompleter.Completer(imported_objects).complete) - readline.parse_and_bind("tab:complete") - - # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system - # conventions and get $PYTHONSTARTUP first then import user. - if not use_plain: - pythonrc = os.environ.get("PYTHONSTARTUP") - if pythonrc and os.path.isfile(pythonrc): - try: - execfile(pythonrc) - except NameError: - pass - # This will import .pythonrc.py as a side-effect - import user - code.interact(local=imported_objects) From f6b17513b83c9a1ca49b147c6bc049767a14b034 Mon Sep 17 00:00:00 2001 From: Jason Keene Date: Tue, 9 Aug 2011 10:49:03 -0400 Subject: [PATCH 0007/1758] Fixed logic w/ settings.INTERNAL_IPS check. --- debug_toolbar/middleware.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/debug_toolbar/middleware.py b/debug_toolbar/middleware.py index 05d8e8093..6eb5d6135 100644 --- a/debug_toolbar/middleware.py +++ b/debug_toolbar/middleware.py @@ -69,10 +69,7 @@ def _show_toolbar(self, request): remote_addr = request.META.get('REMOTE_ADDR', None) # if not internal ip, and not DEBUG - if not (remote_addr in settings.INTERNAL_IPS or settings.DEBUG): - return False - - return True + return remote_addr in settings.INTERNAL_IPS and settings.DEBUG def process_request(self, request): __traceback_hide__ = True From b4ac65ced2ebcaebf8f326e2651044bd3cdf765c Mon Sep 17 00:00:00 2001 From: Jason Keene Date: Tue, 9 Aug 2011 10:50:54 -0400 Subject: [PATCH 0008/1758] Force bool on settings.DEBUG as it could be set to a non bool value. --- debug_toolbar/middleware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debug_toolbar/middleware.py b/debug_toolbar/middleware.py index 6eb5d6135..0c6821f92 100644 --- a/debug_toolbar/middleware.py +++ b/debug_toolbar/middleware.py @@ -69,7 +69,7 @@ def _show_toolbar(self, request): remote_addr = request.META.get('REMOTE_ADDR', None) # if not internal ip, and not DEBUG - return remote_addr in settings.INTERNAL_IPS and settings.DEBUG + return remote_addr in settings.INTERNAL_IPS and bool(settings.DEBUG) def process_request(self, request): __traceback_hide__ = True From 83415bcb8d8f391b31bc320c73988aef9c106a36 Mon Sep 17 00:00:00 2001 From: Jason Keene Date: Tue, 9 Aug 2011 11:50:07 -0400 Subject: [PATCH 0009/1758] Fixed tests that check INTERNAL_IPS setting. --- tests/tests.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/tests.py b/tests/tests.py index 80fda7d6a..6f7de0d81 100644 --- a/tests/tests.py +++ b/tests/tests.py @@ -55,23 +55,25 @@ def test_middleware(self): def test_show_toolbar_DEBUG(self): request = self.request + request.META = {'REMOTE_ADDR': '127.0.0.1'} middleware = DebugToolbarMiddleware() - with Settings(DEBUG=True): + with Settings(INTERNAL_IPS=['127.0.0.1'], DEBUG=True): self.assertTrue(middleware._show_toolbar(request)) - with Settings(DEBUG=False): + with Settings(INTERNAL_IPS=['127.0.0.1'], DEBUG=False): self.assertFalse(middleware._show_toolbar(request)) def test_show_toolbar_TEST(self): request = self.request + request.META = {'REMOTE_ADDR': '127.0.0.1'} middleware = DebugToolbarMiddleware() - with Settings(TEST=True, DEBUG=True): + with Settings(INTERNAL_IPS=['127.0.0.1'], TEST=True, DEBUG=True): self.assertFalse(middleware._show_toolbar(request)) - with Settings(TEST=False, DEBUG=True): + with Settings(INTERNAL_IPS=['127.0.0.1'], TEST=False, DEBUG=True): self.assertTrue(middleware._show_toolbar(request)) def test_show_toolbar_INTERNAL_IPS(self): @@ -80,10 +82,10 @@ def test_show_toolbar_INTERNAL_IPS(self): request.META = {'REMOTE_ADDR': '127.0.0.1'} middleware = DebugToolbarMiddleware() - with Settings(INTERNAL_IPS=['127.0.0.1']): + with Settings(INTERNAL_IPS=['127.0.0.1'], DEBUG=True): self.assertTrue(middleware._show_toolbar(request)) - with Settings(INTERNAL_IPS=[]): + with Settings(INTERNAL_IPS=[], DEBUG=True): self.assertFalse(middleware._show_toolbar(request)) def test_request_urlconf_string(self): @@ -93,7 +95,7 @@ def test_request_urlconf_string(self): request.META = {'REMOTE_ADDR': '127.0.0.1'} middleware = DebugToolbarMiddleware() - with Settings(DEBUG=True): + with Settings(INTERNAL_IPS=['127.0.0.1'], DEBUG=True): middleware.process_request(request) self.assertFalse(isinstance(request.urlconf, basestring)) @@ -109,7 +111,7 @@ def test_request_urlconf_string_per_request(self): request.META = {'REMOTE_ADDR': '127.0.0.1'} middleware = DebugToolbarMiddleware() - with Settings(DEBUG=True): + with Settings(INTERNAL_IPS=['127.0.0.1'], DEBUG=True): middleware.process_request(request) request.urlconf = 'debug_toolbar.urls' middleware.process_request(request) @@ -127,7 +129,7 @@ def test_request_urlconf_module(self): request.META = {'REMOTE_ADDR': '127.0.0.1'} middleware = DebugToolbarMiddleware() - with Settings(DEBUG=True): + with Settings(INTERNAL_IPS=['127.0.0.1'], DEBUG=True): middleware.process_request(request) self.assertFalse(isinstance(request.urlconf, basestring)) From 6e3d9da54cffb41461c6778c55b0461410a9655c Mon Sep 17 00:00:00 2001 From: Rob Hudson Date: Tue, 9 Aug 2011 10:26:42 -0700 Subject: [PATCH 0010/1758] Updated sqlparse to v0.1.3 --- debug_toolbar/utils/sqlparse/__init__.py | 6 +- .../utils/sqlparse/engine/__init__.py | 20 +- debug_toolbar/utils/sqlparse/engine/filter.py | 15 +- .../utils/sqlparse/engine/grouping.py | 170 ++- debug_toolbar/utils/sqlparse/filters.py | 174 ++- debug_toolbar/utils/sqlparse/formatter.py | 6 +- debug_toolbar/utils/sqlparse/keywords.py | 1141 ++++++++--------- debug_toolbar/utils/sqlparse/lexer.py | 106 +- debug_toolbar/utils/sqlparse/sql.py | 136 +- debug_toolbar/utils/sqlparse/tokens.py | 84 +- 10 files changed, 960 insertions(+), 898 deletions(-) diff --git a/debug_toolbar/utils/sqlparse/__init__.py b/debug_toolbar/utils/sqlparse/__init__.py index 69873ca7f..99db30ece 100644 --- a/debug_toolbar/utils/sqlparse/__init__.py +++ b/debug_toolbar/utils/sqlparse/__init__.py @@ -6,10 +6,7 @@ """Parse SQL statements.""" -__version__ = '0.1.1' - - -import os +__version__ = '0.1.3' class SQLParseError(Exception): @@ -56,4 +53,3 @@ def split(sql): stack = engine.FilterStack() stack.split_statements = True return [unicode(stmt) for stmt in stack.run(sql)] - diff --git a/debug_toolbar/utils/sqlparse/engine/__init__.py b/debug_toolbar/utils/sqlparse/engine/__init__.py index cae079360..e838a3ede 100644 --- a/debug_toolbar/utils/sqlparse/engine/__init__.py +++ b/debug_toolbar/utils/sqlparse/engine/__init__.py @@ -5,9 +5,7 @@ """filter""" -import re - -from debug_toolbar.utils.sqlparse import lexer, SQLParseError +from debug_toolbar.utils.sqlparse import lexer from debug_toolbar.utils.sqlparse.engine import grouping from debug_toolbar.utils.sqlparse.engine.filter import StatementFilter @@ -42,8 +40,8 @@ def run(self, sql): stream = lexer.tokenize(sql) # Process token stream if self.preprocess: - for filter_ in self.preprocess: - stream = filter_.process(self, stream) + for filter_ in self.preprocess: + stream = filter_.process(self, stream) if (self.stmtprocess or self.postprocess or self.split_statements or self._grouping): @@ -51,6 +49,7 @@ def run(self, sql): stream = splitter.process(self, stream) if self._grouping: + def _group(stream): for stmt in stream: grouping.group(stmt) @@ -58,23 +57,24 @@ def _group(stream): stream = _group(stream) if self.stmtprocess: - def _run(stream): + + def _run1(stream): ret = [] for stmt in stream: for filter_ in self.stmtprocess: filter_.process(self, stmt) ret.append(stmt) return ret - stream = _run(stream) + stream = _run1(stream) if self.postprocess: - def _run(stream): + + def _run2(stream): for stmt in stream: stmt.tokens = list(self._flatten(stmt.tokens)) for filter_ in self.postprocess: stmt = filter_.process(self, stmt) yield stmt - stream = _run(stream) + stream = _run2(stream) return stream - diff --git a/debug_toolbar/utils/sqlparse/engine/filter.py b/debug_toolbar/utils/sqlparse/engine/filter.py index 8d1c7b2bc..c1c0d6a0c 100644 --- a/debug_toolbar/utils/sqlparse/engine/filter.py +++ b/debug_toolbar/utils/sqlparse/engine/filter.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- +from debug_toolbar.utils.sqlparse.sql import Statement, Token from debug_toolbar.utils.sqlparse import tokens as T -from debug_toolbar.utils.sqlparse.engine.grouping import Statement, Token class TokenFilter(object): @@ -21,11 +21,13 @@ def __init__(self): self._in_declare = False self._in_dbldollar = False self._is_create = False + self._begin_depth = 0 def _reset(self): self._in_declare = False self._in_dbldollar = False self._is_create = False + self._begin_depth = 0 def _change_splitlevel(self, ttype, value): # PostgreSQL @@ -41,29 +43,32 @@ def _change_splitlevel(self, ttype, value): return 0 # ANSI - if ttype is not T.Keyword: + if ttype not in T.Keyword: return 0 unified = value.upper() - if unified == 'DECLARE': + if unified == 'DECLARE' and self._is_create: self._in_declare = True return 1 if unified == 'BEGIN': - if self._in_declare: + self._begin_depth += 1 + if self._in_declare: # FIXME(andi): This makes no sense. return 0 return 0 if unified == 'END': # Should this respect a preceeding BEGIN? # In CASE ... WHEN ... END this results in a split level -1. + self._begin_depth = max(0, self._begin_depth-1) return -1 if ttype is T.Keyword.DDL and unified.startswith('CREATE'): self._is_create = True + return 0 - if unified in ('IF', 'FOR') and self._is_create: + if unified in ('IF', 'FOR') and self._is_create and self._begin_depth > 0: return 1 # Default diff --git a/debug_toolbar/utils/sqlparse/engine/grouping.py b/debug_toolbar/utils/sqlparse/engine/grouping.py index 532ccec3f..4e50c7b19 100644 --- a/debug_toolbar/utils/sqlparse/engine/grouping.py +++ b/debug_toolbar/utils/sqlparse/engine/grouping.py @@ -1,16 +1,19 @@ # -*- coding: utf-8 -*- import itertools -import re -import types +from debug_toolbar.utils.sqlparse import sql from debug_toolbar.utils.sqlparse import tokens as T -from debug_toolbar.utils.sqlparse.sql import * +try: + next +except NameError: # Python < 2.6 + next = lambda i: i.next() def _group_left_right(tlist, ttype, value, cls, check_right=lambda t: True, + check_left=lambda t: True, include_semicolon=False): [_group_left_right(sgroup, ttype, value, cls, check_right, include_semicolon) for sgroup in tlist.get_sublists() @@ -20,14 +23,20 @@ def _group_left_right(tlist, ttype, value, cls, while token: right = tlist.token_next(tlist.token_index(token)) left = tlist.token_prev(tlist.token_index(token)) - if (right is None or not check_right(right) - or left is None): - token = tlist.token_next_match(tlist.token_index(token)+1, + if right is None or not check_right(right): + token = tlist.token_next_match(tlist.token_index(token) + 1, + ttype, value) + elif left is None or not check_right(left): + token = tlist.token_next_match(tlist.token_index(token) + 1, ttype, value) else: if include_semicolon: - right = tlist.token_next_match(tlist.token_index(right), - T.Punctuation, ';') + sright = tlist.token_next_match(tlist.token_index(right), + T.Punctuation, ';') + if sright is not None: + # only overwrite "right" if a semicolon is actually + # present. + right = sright tokens = tlist.tokens_between(left, right)[1:] if not isinstance(left, cls): new = cls([left]) @@ -38,9 +47,10 @@ def _group_left_right(tlist, ttype, value, cls, left.tokens.extend(tokens) for t in tokens: tlist.tokens.remove(t) - token = tlist.token_next_match(tlist.token_index(left)+1, + token = tlist.token_next_match(tlist.token_index(left) + 1, ttype, value) + def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value, cls, include_semicolon=False, recurse=False): def _find_matching(i, tl, stt, sva, ett, eva): @@ -66,7 +76,7 @@ def _find_matching(i, tl, stt, sva, ett, eva): end = _find_matching(tidx, tlist, start_ttype, start_value, end_ttype, end_value) if end is None: - idx = tidx+1 + idx = tidx + 1 else: if include_semicolon: next_ = tlist.token_next(tlist.token_index(end)) @@ -75,71 +85,102 @@ def _find_matching(i, tl, stt, sva, ett, eva): group = tlist.group_tokens(cls, tlist.tokens_between(token, end)) _group_matching(group, start_ttype, start_value, end_ttype, end_value, cls, include_semicolon) - idx = tlist.token_index(group)+1 + idx = tlist.token_index(group) + 1 token = tlist.token_next_match(idx, start_ttype, start_value) + def group_if(tlist): - _group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', If, True) + _group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', sql.If, True) + def group_for(tlist): - _group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP', For, True) + _group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP', + sql.For, True) + def group_as(tlist): - _group_left_right(tlist, T.Keyword, 'AS', Identifier) + + def _right_valid(token): + # Currently limited to DML/DDL. Maybe additional more non SQL reserved + # keywords should appear here (see issue8). + return not token.ttype in (T.DML, T.DDL) + _group_left_right(tlist, T.Keyword, 'AS', sql.Identifier, + check_right=_right_valid) + def group_assignment(tlist): - _group_left_right(tlist, T.Assignment, ':=', Assignment, + _group_left_right(tlist, T.Assignment, ':=', sql.Assignment, include_semicolon=True) -def group_comparsion(tlist): - _group_left_right(tlist, T.Operator, None, Comparsion) + +def group_comparison(tlist): + + def _parts_valid(token): + return (token.ttype in (T.String.Symbol, T.Name, T.Number, + T.Number.Integer, T.Literal, + T.Literal.Number.Integer) + or isinstance(token, (sql.Identifier,))) + _group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison, + check_left=_parts_valid, check_right=_parts_valid) def group_case(tlist): - _group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', Case, + _group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', sql.Case, include_semicolon=True, recurse=True) def group_identifier(tlist): def _consume_cycle(tl, i): - x = itertools.cycle((lambda y: y.match(T.Punctuation, '.'), - lambda y: y.ttype in (T.String.Symbol, - T.Name, - T.Wildcard))) + x = itertools.cycle(( + lambda y: (y.match(T.Punctuation, '.') + or y.ttype is T.Operator), + lambda y: (y.ttype in (T.String.Symbol, + T.Name, + T.Wildcard, + T.Literal.Number.Integer)))) for t in tl.tokens[i:]: - if x.next()(t): + if next(x)(t): yield t else: raise StopIteration # bottom up approach: group subgroups first [group_identifier(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Identifier)] + if not isinstance(sgroup, sql.Identifier)] # real processing idx = 0 - token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name)) + token = tlist.token_next_by_instance(idx, sql.Function) + if token is None: + token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name)) while token: - identifier_tokens = [token]+list( + identifier_tokens = [token] + list( _consume_cycle(tlist, - tlist.token_index(token)+1)) - group = tlist.group_tokens(Identifier, identifier_tokens) - idx = tlist.token_index(group)+1 - token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name)) + tlist.token_index(token) + 1)) + if not (len(identifier_tokens) == 1 + and isinstance(identifier_tokens[0], sql.Function)): + group = tlist.group_tokens(sql.Identifier, identifier_tokens) + idx = tlist.token_index(group) + 1 + else: + idx += 1 + token = tlist.token_next_by_instance(idx, sql.Function) + if token is None: + token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name)) def group_identifier_list(tlist): [group_identifier_list(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, (Identifier, IdentifierList))] + if not isinstance(sgroup, sql.IdentifierList)] idx = 0 # Allowed list items - fend1_funcs = [lambda t: isinstance(t, Identifier), + fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function)), lambda t: t.is_whitespace(), + lambda t: t.ttype == T.Name, lambda t: t.ttype == T.Wildcard, lambda t: t.match(T.Keyword, 'null'), lambda t: t.ttype == T.Number.Integer, lambda t: t.ttype == T.String.Single, - lambda t: isinstance(t, Comparsion), + lambda t: isinstance(t, sql.Comparison), ] tcomma = tlist.token_next_match(idx, T.Punctuation, ',') start = None @@ -156,7 +197,7 @@ def group_identifier_list(tlist): if not bpassed or not apassed: # Something's wrong here, skip ahead to next "," start = None - tcomma = tlist.token_next_match(tlist.token_index(tcomma)+1, + tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1, T.Punctuation, ',') else: if start is None: @@ -165,25 +206,27 @@ def group_identifier_list(tlist): if next_ is None or not next_.match(T.Punctuation, ','): # Reached the end of the list tokens = tlist.tokens_between(start, after) - group = tlist.group_tokens(IdentifierList, tokens) + group = tlist.group_tokens(sql.IdentifierList, tokens) start = None - tcomma = tlist.token_next_match(tlist.token_index(group)+1, + tcomma = tlist.token_next_match(tlist.token_index(group) + 1, T.Punctuation, ',') else: tcomma = next_ def group_parenthesis(tlist): - _group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')', Parenthesis) + _group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')', + sql.Parenthesis) + def group_comments(tlist): [group_comments(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Comment)] + if not isinstance(sgroup, sql.Comment)] idx = 0 token = tlist.token_next_by_type(idx, T.Comment) while token: tidx = tlist.token_index(token) - end = tlist.token_not_matching(tidx+1, + end = tlist.token_not_matching(tidx + 1, [lambda t: t.ttype in T.Comment, lambda t: t.is_whitespace()]) if end is None: @@ -192,49 +235,70 @@ def group_comments(tlist): eidx = tlist.token_index(end) grp_tokens = tlist.tokens_between(token, tlist.token_prev(eidx, False)) - group = tlist.group_tokens(Comment, grp_tokens) + group = tlist.group_tokens(sql.Comment, grp_tokens) idx = tlist.token_index(group) token = tlist.token_next_by_type(idx, T.Comment) + def group_where(tlist): [group_where(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Where)] + if not isinstance(sgroup, sql.Where)] idx = 0 token = tlist.token_next_match(idx, T.Keyword, 'WHERE') stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION') while token: tidx = tlist.token_index(token) - end = tlist.token_next_match(tidx+1, T.Keyword, stopwords) + end = tlist.token_next_match(tidx + 1, T.Keyword, stopwords) if end is None: - end = tlist.tokens[-1] + end = tlist._groupable_tokens[-1] else: - end = tlist.tokens[tlist.token_index(end)-1] - group = tlist.group_tokens(Where, tlist.tokens_between(token, end)) + end = tlist.tokens[tlist.token_index(end) - 1] + group = tlist.group_tokens(sql.Where, + tlist.tokens_between(token, end), + ignore_ws=True) idx = tlist.token_index(group) token = tlist.token_next_match(idx, T.Keyword, 'WHERE') + def group_aliased(tlist): [group_aliased(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Identifier)] + if not isinstance(sgroup, (sql.Identifier, sql.Function))] idx = 0 - token = tlist.token_next_by_instance(idx, Identifier) + token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function)) while token: next_ = tlist.token_next(tlist.token_index(token)) - if next_ is not None and isinstance(next_, Identifier): + if next_ is not None and isinstance(next_, (sql.Identifier, sql.Function)): grp = tlist.tokens_between(token, next_)[1:] token.tokens.extend(grp) for t in grp: tlist.tokens.remove(t) - idx = tlist.token_index(token)+1 - token = tlist.token_next_by_instance(idx, Identifier) + idx = tlist.token_index(token) + 1 + token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function)) def group_typecasts(tlist): - _group_left_right(tlist, T.Punctuation, '::', Identifier) + _group_left_right(tlist, T.Punctuation, '::', sql.Identifier) + + +def group_functions(tlist): + [group_functions(sgroup) for sgroup in tlist.get_sublists() + if not isinstance(sgroup, sql.Function)] + idx = 0 + token = tlist.token_next_by_type(idx, T.Name) + while token: + next_ = tlist.token_next(token) + if not isinstance(next_, sql.Parenthesis): + idx = tlist.token_index(token) + 1 + else: + func = tlist.group_tokens(sql.Function, + tlist.tokens_between(token, next_)) + idx = tlist.token_index(func) + 1 + token = tlist.token_next_by_type(idx, T.Name) def group(tlist): for func in [group_parenthesis, + group_functions, group_comments, group_where, group_case, @@ -243,8 +307,8 @@ def group(tlist): group_as, group_aliased, group_assignment, - group_comparsion, + group_comparison, group_identifier_list, group_if, - group_for,]: + group_for]: func(tlist) diff --git a/debug_toolbar/utils/sqlparse/filters.py b/debug_toolbar/utils/sqlparse/filters.py index 3c9279134..2d247e7b7 100644 --- a/debug_toolbar/utils/sqlparse/filters.py +++ b/debug_toolbar/utils/sqlparse/filters.py @@ -2,7 +2,6 @@ import re -from debug_toolbar.utils.sqlparse.engine import grouping from debug_toolbar.utils.sqlparse import tokens as T from debug_toolbar.utils.sqlparse import sql @@ -19,34 +18,6 @@ def process(self, stack, stream): raise NotImplementedError -# FIXME: Should be removed -def rstrip(stream): - buff = [] - for token in stream: - if token.is_whitespace() and '\n' in token.value: - # assuming there's only one \n in value - before, rest = token.value.split('\n', 1) - token.value = '\n%s' % rest - buff = [] - yield token - elif token.is_whitespace(): - buff.append(token) - elif token.is_group(): - token.tokens = list(rstrip(token.tokens)) - # process group and look if it starts with a nl - if token.tokens and token.tokens[0].is_whitespace(): - before, rest = token.tokens[0].value.split('\n', 1) - token.tokens[0].value = '\n%s' % rest - buff = [] - while buff: - yield buff.pop(0) - yield token - else: - while buff: - yield buff.pop(0) - yield token - - # -------------------------- # token process @@ -74,17 +45,28 @@ class KeywordCaseFilter(_CaseFilter): class IdentifierCaseFilter(_CaseFilter): ttype = (T.Name, T.String.Symbol) + def process(self, stack, stream): + for ttype, value in stream: + if ttype in self.ttype and not value.strip()[0] == '"': + value = self.convert(value) + yield ttype, value + # ---------------------- # statement process class StripCommentsFilter(Filter): + def _get_next_comment(self, tlist): + # TODO(andi) Comment types should be unified, see related issue38 + token = tlist.token_next_by_instance(0, sql.Comment) + if token is None: + token = tlist.token_next_by_type(0, T.Comment) + return token + def _process(self, tlist): - idx = 0 - clss = set([x.__class__ for x in tlist.tokens]) - while grouping.Comment in clss: - token = tlist.token_next_by_instance(0, grouping.Comment) + token = self._get_next_comment(tlist) + while token: tidx = tlist.token_index(token) prev = tlist.token_prev(tidx, False) next_ = tlist.token_next(tidx, False) @@ -94,10 +76,10 @@ def _process(self, tlist): and not prev.is_whitespace() and not next_.is_whitespace() and not (prev.match(T.Punctuation, '(') or next_.match(T.Punctuation, ')'))): - tlist.tokens[tidx] = grouping.Token(T.Whitespace, ' ') + tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ') else: tlist.tokens.pop(tidx) - clss = set([x.__class__ for x in tlist.tokens]) + token = self._get_next_comment(tlist) def process(self, stack, stmt): [self.process(stack, sgroup) for sgroup in stmt.get_sublists()] @@ -149,24 +131,32 @@ def __init__(self, width=2, char=' ', line_width=None): def _get_offset(self, token): all_ = list(self._curr_stmt.flatten()) idx = all_.index(token) - raw = ''.join(unicode(x) for x in all_[:idx+1]) + raw = ''.join(unicode(x) for x in all_[:idx + 1]) line = raw.splitlines()[-1] # Now take current offset into account and return relative offset. - full_offset = len(line)-(len(self.char*(self.width*self.indent))) + full_offset = len(line) - len(self.char * (self.width * self.indent)) return full_offset - self.offset def nl(self): # TODO: newline character should be configurable - ws = '\n'+(self.char*((self.indent*self.width)+self.offset)) - return grouping.Token(T.Whitespace, ws) + ws = '\n' + (self.char * ((self.indent * self.width) + self.offset)) + return sql.Token(T.Whitespace, ws) def _split_kwds(self, tlist): split_words = ('FROM', 'JOIN$', 'AND', 'OR', 'GROUP', 'ORDER', 'UNION', 'VALUES', - 'SET') - idx = 0 - token = tlist.token_next_match(idx, T.Keyword, split_words, + 'SET', 'BETWEEN') + def _next_token(i): + t = tlist.token_next_match(i, T.Keyword, split_words, regex=True) + if t and t.value.upper() == 'BETWEEN': + t = _next_token(tlist.token_index(t)+1) + if t and t.value.upper() == 'AND': + t = _next_token(tlist.token_index(t)+1) + return t + + idx = 0 + token = _next_token(idx) while token: prev = tlist.token_prev(tlist.token_index(token), False) offset = 1 @@ -181,8 +171,7 @@ def _split_kwds(self, tlist): else: nl = self.nl() tlist.insert_before(token, nl) - token = tlist.token_next_match(tlist.token_index(nl)+offset, - T.Keyword, split_words, regex=True) + token = _next_token(tlist.token_index(nl) + offset) def _split_statements(self, tlist): idx = 0 @@ -195,7 +184,7 @@ def _split_statements(self, tlist): if prev: nl = self.nl() tlist.insert_before(token, nl) - token = tlist.token_next_by_type(tlist.token_index(token)+1, + token = tlist.token_next_by_type(tlist.token_index(token) + 1, (T.Keyword.DDL, T.Keyword.DML)) def _process(self, tlist): @@ -227,9 +216,9 @@ def _process_parenthesis(self, tlist): def _process_identifierlist(self, tlist): identifiers = tlist.get_identifiers() - if len(identifiers) > 1: + if len(identifiers) > 1 and not tlist.within(sql.Function): first = list(identifiers[0].flatten())[0] - num_offset = self._get_offset(first)-len(first.value) + num_offset = self._get_offset(first) - len(first.value) self.offset += num_offset for token in identifiers[1:]: tlist.insert_before(token, self.nl()) @@ -237,16 +226,16 @@ def _process_identifierlist(self, tlist): self._process_default(tlist) def _process_case(self, tlist): - cases = tlist.get_cases() is_first = True num_offset = None case = tlist.tokens[0] - outer_offset = self._get_offset(case)-len(case.value) + outer_offset = self._get_offset(case) - len(case.value) self.offset += outer_offset for cond, value in tlist.get_cases(): if is_first: + tcond = list(cond[0].flatten())[0] is_first = False - num_offset = self._get_offset(cond[0])-len(cond[0].value) + num_offset = self._get_offset(tcond) - len(tcond.value) self.offset += num_offset continue if cond is None: @@ -273,17 +262,17 @@ def _process_default(self, tlist, stmts=True, kwds=True): [self._process(sgroup) for sgroup in tlist.get_sublists()] def process(self, stack, stmt): - if isinstance(stmt, grouping.Statement): + if isinstance(stmt, sql.Statement): self._curr_stmt = stmt self._process(stmt) - if isinstance(stmt, grouping.Statement): + if isinstance(stmt, sql.Statement): if self._last_stmt is not None: if self._last_stmt.to_unicode().endswith('\n'): nl = '\n' else: nl = '\n\n' stmt.tokens.insert(0, - grouping.Token(T.Whitespace, nl)) + sql.Token(T.Whitespace, nl)) if self._last_stmt != stmt: self._last_stmt = stmt @@ -292,7 +281,7 @@ def process(self, stack, stmt): class RightMarginFilter(Filter): keep_together = ( -# grouping.TypeCast, grouping.Identifier, grouping.Alias, +# sql.TypeCast, sql.Identifier, sql.Alias, ) def __init__(self, width=79): @@ -317,7 +306,7 @@ def _process(self, stack, group, stream): indent = match.group() else: indent = '' - yield grouping.Token(T.Whitespace, '\n%s' % indent) + yield sql.Token(T.Whitespace, '\n%s' % indent) self.line = indent self.line += val yield token @@ -349,14 +338,14 @@ def __init__(self, varname='sql'): def _process(self, stream, varname, count, has_nl): if count > 1: - yield grouping.Token(T.Whitespace, '\n') - yield grouping.Token(T.Name, varname) - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Operator, '=') - yield grouping.Token(T.Whitespace, ' ') + yield sql.Token(T.Whitespace, '\n') + yield sql.Token(T.Name, varname) + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Operator, '=') + yield sql.Token(T.Whitespace, ' ') if has_nl: - yield grouping.Token(T.Operator, '(') - yield grouping.Token(T.Text, "'") + yield sql.Token(T.Operator, '(') + yield sql.Token(T.Text, "'") cnt = 0 for token in stream: cnt += 1 @@ -364,20 +353,20 @@ def _process(self, stream, varname, count, has_nl): if cnt == 1: continue after_lb = token.value.split('\n', 1)[1] - yield grouping.Token(T.Text, " '") - yield grouping.Token(T.Whitespace, '\n') - for i in range(len(varname)+4): - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Text, "'") + yield sql.Token(T.Text, " '") + yield sql.Token(T.Whitespace, '\n') + for i in range(len(varname) + 4): + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Text, "'") if after_lb: # it's the indendation - yield grouping.Token(T.Whitespace, after_lb) + yield sql.Token(T.Whitespace, after_lb) continue elif token.value and "'" in token.value: token.value = token.value.replace("'", "\\'") - yield grouping.Token(T.Text, token.value or '') - yield grouping.Token(T.Text, "'") + yield sql.Token(T.Text, token.value or '') + yield sql.Token(T.Text, "'") if has_nl: - yield grouping.Token(T.Operator, ')') + yield sql.Token(T.Operator, ')') def process(self, stack, stmt): self.cnt += 1 @@ -398,36 +387,32 @@ def __init__(self, varname='sql'): def _process(self, stream, varname): if self.count > 1: - yield grouping.Token(T.Whitespace, '\n') - yield grouping.Token(T.Name, varname) - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Operator, '=') - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Text, '"') - cnt = 0 + yield sql.Token(T.Whitespace, '\n') + yield sql.Token(T.Name, varname) + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Operator, '=') + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Text, '"') for token in stream: if token.is_whitespace() and '\n' in token.value: -# cnt += 1 -# if cnt == 1: -# continue after_lb = token.value.split('\n', 1)[1] - yield grouping.Token(T.Text, ' "') - yield grouping.Token(T.Operator, ';') - yield grouping.Token(T.Whitespace, '\n') - yield grouping.Token(T.Name, varname) - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Punctuation, '.') - yield grouping.Token(T.Operator, '=') - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Text, '"') + yield sql.Token(T.Text, ' "') + yield sql.Token(T.Operator, ';') + yield sql.Token(T.Whitespace, '\n') + yield sql.Token(T.Name, varname) + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Punctuation, '.') + yield sql.Token(T.Operator, '=') + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Text, '"') if after_lb: - yield grouping.Token(T.Text, after_lb) + yield sql.Token(T.Text, after_lb) continue elif '"' in token.value: token.value = token.value.replace('"', '\\"') - yield grouping.Token(T.Text, token.value) - yield grouping.Token(T.Text, '"') - yield grouping.Token(T.Punctuation, ';') + yield sql.Token(T.Text, token.value) + yield sql.Token(T.Text, '"') + yield sql.Token(T.Punctuation, ';') def process(self, stack, stmt): self.count += 1 @@ -437,4 +422,3 @@ def process(self, stack, stmt): varname = self.varname stmt.tokens = tuple(self._process(stmt.tokens, varname)) return stmt - diff --git a/debug_toolbar/utils/sqlparse/formatter.py b/debug_toolbar/utils/sqlparse/formatter.py index 34e9fe0fd..3acece971 100644 --- a/debug_toolbar/utils/sqlparse/formatter.py +++ b/debug_toolbar/utils/sqlparse/formatter.py @@ -76,11 +76,11 @@ def build_filter_stack(stack, options): options: Dictionary with options validated by validate_options. """ # Token filter - if 'keyword_case' in options: + if options.get('keyword_case', None): stack.preprocess.append( filters.KeywordCaseFilter(options['keyword_case'])) - if 'identifier_case' in options: + if options.get('identifier_case', None): stack.preprocess.append( filters.IdentifierCaseFilter(options['identifier_case'])) @@ -118,5 +118,3 @@ def build_filter_stack(stack, options): stack.postprocess.append(fltr) return stack - - diff --git a/debug_toolbar/utils/sqlparse/keywords.py b/debug_toolbar/utils/sqlparse/keywords.py index cada1399d..4782cfe4f 100644 --- a/debug_toolbar/utils/sqlparse/keywords.py +++ b/debug_toolbar/utils/sqlparse/keywords.py @@ -1,590 +1,565 @@ -from debug_toolbar.utils.sqlparse.tokens import * +from debug_toolbar.utils.sqlparse import tokens KEYWORDS = { - 'ABORT': Keyword, - 'ABS': Keyword, - 'ABSOLUTE': Keyword, - 'ACCESS': Keyword, - 'ADA': Keyword, - 'ADD': Keyword, - 'ADMIN': Keyword, - 'AFTER': Keyword, - 'AGGREGATE': Keyword, - 'ALIAS': Keyword, - 'ALL': Keyword, - 'ALLOCATE': Keyword, - 'ANALYSE': Keyword, - 'ANALYZE': Keyword, - 'AND': Keyword, - 'ANY': Keyword, - 'ARE': Keyword, - 'AS': Keyword, - 'ASC': Keyword, - 'ASENSITIVE': Keyword, - 'ASSERTION': Keyword, - 'ASSIGNMENT': Keyword, - 'ASYMMETRIC': Keyword, - 'AT': Keyword, - 'ATOMIC': Keyword, - 'AUTHORIZATION': Keyword, - 'AVG': Keyword, - - 'BACKWARD': Keyword, - 'BEFORE': Keyword, - 'BEGIN': Keyword, - 'BETWEEN': Keyword, - 'BITVAR': Keyword, - 'BIT_LENGTH': Keyword, - 'BOTH': Keyword, - 'BREADTH': Keyword, - 'BY': Keyword, - -# 'C': Keyword, # most likely this is an alias - 'CACHE': Keyword, - 'CALL': Keyword, - 'CALLED': Keyword, - 'CARDINALITY': Keyword, - 'CASCADE': Keyword, - 'CASCADED': Keyword, - 'CASE': Keyword, - 'CAST': Keyword, - 'CATALOG': Keyword, - 'CATALOG_NAME': Keyword, - 'CHAIN': Keyword, - 'CHARACTERISTICS': Keyword, - 'CHARACTER_LENGTH': Keyword, - 'CHARACTER_SET_CATALOG': Keyword, - 'CHARACTER_SET_NAME': Keyword, - 'CHARACTER_SET_SCHEMA': Keyword, - 'CHAR_LENGTH': Keyword, - 'CHECK': Keyword, - 'CHECKED': Keyword, - 'CHECKPOINT': Keyword, - 'CLASS': Keyword, - 'CLASS_ORIGIN': Keyword, - 'CLOB': Keyword, - 'CLOSE': Keyword, - 'CLUSTER': Keyword, - 'COALSECE': Keyword, - 'COBOL': Keyword, - 'COLLATE': Keyword, - 'COLLATION': Keyword, - 'COLLATION_CATALOG': Keyword, - 'COLLATION_NAME': Keyword, - 'COLLATION_SCHEMA': Keyword, - 'COLUMN': Keyword, - 'COLUMN_NAME': Keyword, - 'COMMAND_FUNCTION': Keyword, - 'COMMAND_FUNCTION_CODE': Keyword, - 'COMMENT': Keyword, - 'COMMIT': Keyword, - 'COMMITTED': Keyword, - 'COMPLETION': Keyword, - 'CONDITION_NUMBER': Keyword, - 'CONNECT': Keyword, - 'CONNECTION': Keyword, - 'CONNECTION_NAME': Keyword, - 'CONSTRAINT': Keyword, - 'CONSTRAINTS': Keyword, - 'CONSTRAINT_CATALOG': Keyword, - 'CONSTRAINT_NAME': Keyword, - 'CONSTRAINT_SCHEMA': Keyword, - 'CONSTRUCTOR': Keyword, - 'CONTAINS': Keyword, - 'CONTINUE': Keyword, - 'CONVERSION': Keyword, - 'CONVERT': Keyword, - 'COPY': Keyword, - 'CORRESPONTING': Keyword, - 'COUNT': Keyword, - 'CREATEDB': Keyword, - 'CREATEUSER': Keyword, - 'CROSS': Keyword, - 'CUBE': Keyword, - 'CURRENT': Keyword, - 'CURRENT_DATE': Keyword, - 'CURRENT_PATH': Keyword, - 'CURRENT_ROLE': Keyword, - 'CURRENT_TIME': Keyword, - 'CURRENT_TIMESTAMP': Keyword, - 'CURRENT_USER': Keyword, - 'CURSOR': Keyword, - 'CURSOR_NAME': Keyword, - 'CYCLE': Keyword, - - 'DATA': Keyword, - 'DATABASE': Keyword, - 'DATETIME_INTERVAL_CODE': Keyword, - 'DATETIME_INTERVAL_PRECISION': Keyword, - 'DAY': Keyword, - 'DEALLOCATE': Keyword, - 'DECLARE': Keyword, - 'DEFAULT': Keyword, - 'DEFAULTS': Keyword, - 'DEFERRABLE': Keyword, - 'DEFERRED': Keyword, - 'DEFINED': Keyword, - 'DEFINER': Keyword, - 'DELIMITER': Keyword, - 'DELIMITERS': Keyword, - 'DEREF': Keyword, - 'DESC': Keyword, - 'DESCRIBE': Keyword, - 'DESCRIPTOR': Keyword, - 'DESTROY': Keyword, - 'DESTRUCTOR': Keyword, - 'DETERMINISTIC': Keyword, - 'DIAGNOSTICS': Keyword, - 'DICTIONARY': Keyword, - 'DISCONNECT': Keyword, - 'DISPATCH': Keyword, - 'DISTINCT': Keyword, - 'DO': Keyword, - 'DOMAIN': Keyword, - 'DYNAMIC': Keyword, - 'DYNAMIC_FUNCTION': Keyword, - 'DYNAMIC_FUNCTION_CODE': Keyword, - - 'EACH': Keyword, - 'ELSE': Keyword, - 'ENCODING': Keyword, - 'ENCRYPTED': Keyword, - 'END': Keyword, - 'END-EXEC': Keyword, - 'EQUALS': Keyword, - 'ESCAPE': Keyword, - 'EVERY': Keyword, - 'EXCEPT': Keyword, - 'ESCEPTION': Keyword, - 'EXCLUDING': Keyword, - 'EXCLUSIVE': Keyword, - 'EXEC': Keyword, - 'EXECUTE': Keyword, - 'EXISTING': Keyword, - 'EXISTS': Keyword, - 'EXTERNAL': Keyword, - 'EXTRACT': Keyword, - - 'FALSE': Keyword, - 'FETCH': Keyword, - 'FINAL': Keyword, - 'FIRST': Keyword, - 'FOR': Keyword, - 'FORCE': Keyword, - 'FOREIGN': Keyword, - 'FORTRAN': Keyword, - 'FORWARD': Keyword, - 'FOUND': Keyword, - 'FREE': Keyword, - 'FREEZE': Keyword, - 'FROM': Keyword, - 'FULL': Keyword, - 'FUNCTION': Keyword, - - 'G': Keyword, - 'GENERAL': Keyword, - 'GENERATED': Keyword, - 'GET': Keyword, - 'GLOBAL': Keyword, - 'GO': Keyword, - 'GOTO': Keyword, - 'GRANT': Keyword, - 'GRANTED': Keyword, - 'GROUP': Keyword, - 'GROUPING': Keyword, - - 'HANDLER': Keyword, - 'HAVING': Keyword, - 'HIERARCHY': Keyword, - 'HOLD': Keyword, - 'HOST': Keyword, - - 'IDENTITY': Keyword, - 'IF': Keyword, - 'IGNORE': Keyword, - 'ILIKE': Keyword, - 'IMMEDIATE': Keyword, - 'IMMUTABLE': Keyword, - - 'IMPLEMENTATION': Keyword, - 'IMPLICIT': Keyword, - 'IN': Keyword, - 'INCLUDING': Keyword, - 'INCREMENT': Keyword, - 'INDEX': Keyword, - - 'INDITCATOR': Keyword, - 'INFIX': Keyword, - 'INHERITS': Keyword, - 'INITIALIZE': Keyword, - 'INITIALLY': Keyword, - 'INNER': Keyword, - 'INOUT': Keyword, - 'INPUT': Keyword, - 'INSENSITIVE': Keyword, - 'INSTANTIABLE': Keyword, - 'INSTEAD': Keyword, - 'INTERSECT': Keyword, - 'INTO': Keyword, - 'INVOKER': Keyword, - 'IS': Keyword, - 'ISNULL': Keyword, - 'ISOLATION': Keyword, - 'ITERATE': Keyword, - - 'JOIN': Keyword, - - 'K': Keyword, - 'KEY': Keyword, - 'KEY_MEMBER': Keyword, - 'KEY_TYPE': Keyword, - - 'LANCOMPILER': Keyword, - 'LANGUAGE': Keyword, - 'LARGE': Keyword, - 'LAST': Keyword, - 'LATERAL': Keyword, - 'LEADING': Keyword, - 'LEFT': Keyword, - 'LENGTH': Keyword, - 'LESS': Keyword, - 'LEVEL': Keyword, - 'LIKE': Keyword, - 'LIMIT': Keyword, - 'LISTEN': Keyword, - 'LOAD': Keyword, - 'LOCAL': Keyword, - 'LOCALTIME': Keyword, - 'LOCALTIMESTAMP': Keyword, - 'LOCATION': Keyword, - 'LOCATOR': Keyword, - 'LOCK': Keyword, - 'LOWER': Keyword, - - 'M': Keyword, - 'MAP': Keyword, - 'MATCH': Keyword, - 'MAX': Keyword, - 'MAXVALUE': Keyword, - 'MESSAGE_LENGTH': Keyword, - 'MESSAGE_OCTET_LENGTH': Keyword, - 'MESSAGE_TEXT': Keyword, - 'METHOD': Keyword, - 'MIN': Keyword, - 'MINUTE': Keyword, - 'MINVALUE': Keyword, - 'MOD': Keyword, - 'MODE': Keyword, - 'MODIFIES': Keyword, - 'MODIFY': Keyword, - 'MONTH': Keyword, - 'MORE': Keyword, - 'MOVE': Keyword, - 'MUMPS': Keyword, - - 'NAMES': Keyword, - 'NATIONAL': Keyword, - 'NATURAL': Keyword, - 'NCHAR': Keyword, - 'NCLOB': Keyword, - 'NEW': Keyword, - 'NEXT': Keyword, - 'NO': Keyword, - 'NOCREATEDB': Keyword, - 'NOCREATEUSER': Keyword, - 'NONE': Keyword, - 'NOT': Keyword, - 'NOTHING': Keyword, - 'NOTIFY': Keyword, - 'NOTNULL': Keyword, - 'NULL': Keyword, - 'NULLABLE': Keyword, - 'NULLIF': Keyword, - - 'OBJECT': Keyword, - 'OCTET_LENGTH': Keyword, - 'OF': Keyword, - 'OFF': Keyword, - 'OFFSET': Keyword, - 'OIDS': Keyword, - 'OLD': Keyword, - 'ON': Keyword, - 'ONLY': Keyword, - 'OPEN': Keyword, - 'OPERATION': Keyword, - 'OPERATOR': Keyword, - 'OPTION': Keyword, - 'OPTIONS': Keyword, - 'OR': Keyword, - 'ORDER': Keyword, - 'ORDINALITY': Keyword, - 'OUT': Keyword, - 'OUTER': Keyword, - 'OUTPUT': Keyword, - 'OVERLAPS': Keyword, - 'OVERLAY': Keyword, - 'OVERRIDING': Keyword, - 'OWNER': Keyword, - - 'PAD': Keyword, - 'PARAMETER': Keyword, - 'PARAMETERS': Keyword, - 'PARAMETER_MODE': Keyword, - 'PARAMATER_NAME': Keyword, - 'PARAMATER_ORDINAL_POSITION': Keyword, - 'PARAMETER_SPECIFIC_CATALOG': Keyword, - 'PARAMETER_SPECIFIC_NAME': Keyword, - 'PARAMATER_SPECIFIC_SCHEMA': Keyword, - 'PARTIAL': Keyword, - 'PASCAL': Keyword, - 'PENDANT': Keyword, - 'PLACING': Keyword, - 'PLI': Keyword, - 'POSITION': Keyword, - 'POSTFIX': Keyword, - 'PRECISION': Keyword, - 'PREFIX': Keyword, - 'PREORDER': Keyword, - 'PREPARE': Keyword, - 'PRESERVE': Keyword, - 'PRIMARY': Keyword, - 'PRIOR': Keyword, - 'PRIVILEGES': Keyword, - 'PROCEDURAL': Keyword, - 'PROCEDURE': Keyword, - 'PUBLIC': Keyword, - - 'RAISE': Keyword, - 'READ': Keyword, - 'READS': Keyword, - 'RECHECK': Keyword, - 'RECURSIVE': Keyword, - 'REF': Keyword, - 'REFERENCES': Keyword, - 'REFERENCING': Keyword, - 'REINDEX': Keyword, - 'RELATIVE': Keyword, - 'RENAME': Keyword, - 'REPEATABLE': Keyword, - 'REPLACE': Keyword, - 'RESET': Keyword, - 'RESTART': Keyword, - 'RESTRICT': Keyword, - 'RESULT': Keyword, - 'RETURN': Keyword, - 'RETURNED_LENGTH': Keyword, - 'RETURNED_OCTET_LENGTH': Keyword, - 'RETURNED_SQLSTATE': Keyword, - 'RETURNS': Keyword, - 'REVOKE': Keyword, - 'RIGHT': Keyword, - 'ROLE': Keyword, - 'ROLLBACK': Keyword, - 'ROLLUP': Keyword, - 'ROUTINE': Keyword, - 'ROUTINE_CATALOG': Keyword, - 'ROUTINE_NAME': Keyword, - 'ROUTINE_SCHEMA': Keyword, - 'ROW': Keyword, - 'ROWS': Keyword, - 'ROW_COUNT': Keyword, - 'RULE': Keyword, - - 'SAVE_POINT': Keyword, - 'SCALE': Keyword, - 'SCHEMA': Keyword, - 'SCHEMA_NAME': Keyword, - 'SCOPE': Keyword, - 'SCROLL': Keyword, - 'SEARCH': Keyword, - 'SECOND': Keyword, - 'SECURITY': Keyword, - 'SELF': Keyword, - 'SENSITIVE': Keyword, - 'SERIALIZABLE': Keyword, - 'SERVER_NAME': Keyword, - 'SESSION': Keyword, - 'SESSION_USER': Keyword, - 'SETOF': Keyword, - 'SETS': Keyword, - 'SHARE': Keyword, - 'SHOW': Keyword, - 'SIMILAR': Keyword, - 'SIMPLE': Keyword, - 'SIZE': Keyword, - 'SOME': Keyword, - 'SOURCE': Keyword, - 'SPACE': Keyword, - 'SPECIFIC': Keyword, - 'SPECIFICTYPE': Keyword, - 'SPECIFIC_NAME': Keyword, - 'SQL': Keyword, - 'SQLCODE': Keyword, - 'SQLERROR': Keyword, - 'SQLEXCEPTION': Keyword, - 'SQLSTATE': Keyword, - 'SQLWARNINIG': Keyword, - 'STABLE': Keyword, - 'START': Keyword, - 'STATE': Keyword, - 'STATEMENT': Keyword, - 'STATIC': Keyword, - 'STATISTICS': Keyword, - 'STDIN': Keyword, - 'STDOUT': Keyword, - 'STORAGE': Keyword, - 'STRICT': Keyword, - 'STRUCTURE': Keyword, - 'STYPE': Keyword, - 'SUBCLASS_ORIGIN': Keyword, - 'SUBLIST': Keyword, - 'SUBSTRING': Keyword, - 'SUM': Keyword, - 'SYMMETRIC': Keyword, - 'SYSID': Keyword, - 'SYSTEM': Keyword, - 'SYSTEM_USER': Keyword, - - 'TABLE': Keyword, - 'TABLE_NAME': Keyword, - ' TEMP': Keyword, - 'TEMPLATE': Keyword, - 'TEMPORARY': Keyword, - 'TERMINATE': Keyword, - 'THAN': Keyword, - 'THEN': Keyword, - 'TIMESTAMP': Keyword, - 'TIMEZONE_HOUR': Keyword, - 'TIMEZONE_MINUTE': Keyword, - 'TO': Keyword, - 'TOAST': Keyword, - 'TRAILING': Keyword, - 'TRANSATION': Keyword, - 'TRANSACTIONS_COMMITTED': Keyword, - 'TRANSACTIONS_ROLLED_BACK': Keyword, - 'TRANSATION_ACTIVE': Keyword, - 'TRANSFORM': Keyword, - 'TRANSFORMS': Keyword, - 'TRANSLATE': Keyword, - 'TRANSLATION': Keyword, - 'TREAT': Keyword, - 'TRIGGER': Keyword, - 'TRIGGER_CATALOG': Keyword, - 'TRIGGER_NAME': Keyword, - 'TRIGGER_SCHEMA': Keyword, - 'TRIM': Keyword, - 'TRUE': Keyword, - 'TRUNCATE': Keyword, - 'TRUSTED': Keyword, - 'TYPE': Keyword, - - 'UNCOMMITTED': Keyword, - 'UNDER': Keyword, - 'UNENCRYPTED': Keyword, - 'UNION': Keyword, - 'UNIQUE': Keyword, - 'UNKNOWN': Keyword, - 'UNLISTEN': Keyword, - 'UNNAMED': Keyword, - 'UNNEST': Keyword, - 'UNTIL': Keyword, - 'UPPER': Keyword, - 'USAGE': Keyword, - 'USER': Keyword, - 'USER_DEFINED_TYPE_CATALOG': Keyword, - 'USER_DEFINED_TYPE_NAME': Keyword, - 'USER_DEFINED_TYPE_SCHEMA': Keyword, - 'USING': Keyword, - - 'VACUUM': Keyword, - 'VALID': Keyword, - 'VALIDATOR': Keyword, - 'VALUES': Keyword, - 'VARIABLE': Keyword, - 'VERBOSE': Keyword, - 'VERSION': Keyword, - 'VIEW': Keyword, - 'VOLATILE': Keyword, - - 'WHEN': Keyword, - 'WHENEVER': Keyword, - 'WHERE': Keyword, - 'WITH': Keyword, - 'WITHOUT': Keyword, - 'WORK': Keyword, - 'WRITE': Keyword, - - 'YEAR': Keyword, - - 'ZONE': Keyword, - - - 'ARRAY': Name.Builtin, - 'BIGINT': Name.Builtin, - 'BINARY': Name.Builtin, - 'BIT': Name.Builtin, - 'BLOB': Name.Builtin, - 'BOOLEAN': Name.Builtin, - 'CHAR': Name.Builtin, - 'CHARACTER': Name.Builtin, - 'DATE': Name.Builtin, - 'DEC': Name.Builtin, - 'DECIMAL': Name.Builtin, - 'FLOAT': Name.Builtin, - 'INT': Name.Builtin, - 'INTEGER': Name.Builtin, - 'INTERVAL': Name.Builtin, - 'NUMBER': Name.Builtin, - 'NUMERIC': Name.Builtin, - 'REAL': Name.Builtin, - 'SERIAL': Name.Builtin, - 'SMALLINT': Name.Builtin, - 'VARCHAR': Name.Builtin, - 'VARYING': Name.Builtin, - 'INT8': Name.Builtin, - 'SERIAL8': Name.Builtin, - 'TEXT': Name.Builtin, + 'ABORT': tokens.Keyword, + 'ABS': tokens.Keyword, + 'ABSOLUTE': tokens.Keyword, + 'ACCESS': tokens.Keyword, + 'ADA': tokens.Keyword, + 'ADD': tokens.Keyword, + 'ADMIN': tokens.Keyword, + 'AFTER': tokens.Keyword, + 'AGGREGATE': tokens.Keyword, + 'ALIAS': tokens.Keyword, + 'ALL': tokens.Keyword, + 'ALLOCATE': tokens.Keyword, + 'ANALYSE': tokens.Keyword, + 'ANALYZE': tokens.Keyword, + 'ANY': tokens.Keyword, + 'ARE': tokens.Keyword, + 'ASC': tokens.Keyword, + 'ASENSITIVE': tokens.Keyword, + 'ASSERTION': tokens.Keyword, + 'ASSIGNMENT': tokens.Keyword, + 'ASYMMETRIC': tokens.Keyword, + 'AT': tokens.Keyword, + 'ATOMIC': tokens.Keyword, + 'AUTHORIZATION': tokens.Keyword, + 'AVG': tokens.Keyword, + + 'BACKWARD': tokens.Keyword, + 'BEFORE': tokens.Keyword, + 'BEGIN': tokens.Keyword, + 'BETWEEN': tokens.Keyword, + 'BITVAR': tokens.Keyword, + 'BIT_LENGTH': tokens.Keyword, + 'BOTH': tokens.Keyword, + 'BREADTH': tokens.Keyword, + +# 'C': tokens.Keyword, # most likely this is an alias + 'CACHE': tokens.Keyword, + 'CALL': tokens.Keyword, + 'CALLED': tokens.Keyword, + 'CARDINALITY': tokens.Keyword, + 'CASCADE': tokens.Keyword, + 'CASCADED': tokens.Keyword, + 'CAST': tokens.Keyword, + 'CATALOG': tokens.Keyword, + 'CATALOG_NAME': tokens.Keyword, + 'CHAIN': tokens.Keyword, + 'CHARACTERISTICS': tokens.Keyword, + 'CHARACTER_LENGTH': tokens.Keyword, + 'CHARACTER_SET_CATALOG': tokens.Keyword, + 'CHARACTER_SET_NAME': tokens.Keyword, + 'CHARACTER_SET_SCHEMA': tokens.Keyword, + 'CHAR_LENGTH': tokens.Keyword, + 'CHECK': tokens.Keyword, + 'CHECKED': tokens.Keyword, + 'CHECKPOINT': tokens.Keyword, + 'CLASS': tokens.Keyword, + 'CLASS_ORIGIN': tokens.Keyword, + 'CLOB': tokens.Keyword, + 'CLOSE': tokens.Keyword, + 'CLUSTER': tokens.Keyword, + 'COALSECE': tokens.Keyword, + 'COBOL': tokens.Keyword, + 'COLLATE': tokens.Keyword, + 'COLLATION': tokens.Keyword, + 'COLLATION_CATALOG': tokens.Keyword, + 'COLLATION_NAME': tokens.Keyword, + 'COLLATION_SCHEMA': tokens.Keyword, + 'COLUMN': tokens.Keyword, + 'COLUMN_NAME': tokens.Keyword, + 'COMMAND_FUNCTION': tokens.Keyword, + 'COMMAND_FUNCTION_CODE': tokens.Keyword, + 'COMMENT': tokens.Keyword, + 'COMMIT': tokens.Keyword, + 'COMMITTED': tokens.Keyword, + 'COMPLETION': tokens.Keyword, + 'CONDITION_NUMBER': tokens.Keyword, + 'CONNECT': tokens.Keyword, + 'CONNECTION': tokens.Keyword, + 'CONNECTION_NAME': tokens.Keyword, + 'CONSTRAINT': tokens.Keyword, + 'CONSTRAINTS': tokens.Keyword, + 'CONSTRAINT_CATALOG': tokens.Keyword, + 'CONSTRAINT_NAME': tokens.Keyword, + 'CONSTRAINT_SCHEMA': tokens.Keyword, + 'CONSTRUCTOR': tokens.Keyword, + 'CONTAINS': tokens.Keyword, + 'CONTINUE': tokens.Keyword, + 'CONVERSION': tokens.Keyword, + 'CONVERT': tokens.Keyword, + 'COPY': tokens.Keyword, + 'CORRESPONTING': tokens.Keyword, + 'COUNT': tokens.Keyword, + 'CREATEDB': tokens.Keyword, + 'CREATEUSER': tokens.Keyword, + 'CROSS': tokens.Keyword, + 'CUBE': tokens.Keyword, + 'CURRENT': tokens.Keyword, + 'CURRENT_DATE': tokens.Keyword, + 'CURRENT_PATH': tokens.Keyword, + 'CURRENT_ROLE': tokens.Keyword, + 'CURRENT_TIME': tokens.Keyword, + 'CURRENT_TIMESTAMP': tokens.Keyword, + 'CURRENT_USER': tokens.Keyword, + 'CURSOR': tokens.Keyword, + 'CURSOR_NAME': tokens.Keyword, + 'CYCLE': tokens.Keyword, + + 'DATA': tokens.Keyword, + 'DATABASE': tokens.Keyword, + 'DATETIME_INTERVAL_CODE': tokens.Keyword, + 'DATETIME_INTERVAL_PRECISION': tokens.Keyword, + 'DAY': tokens.Keyword, + 'DEALLOCATE': tokens.Keyword, + 'DECLARE': tokens.Keyword, + 'DEFAULT': tokens.Keyword, + 'DEFAULTS': tokens.Keyword, + 'DEFERRABLE': tokens.Keyword, + 'DEFERRED': tokens.Keyword, + 'DEFINED': tokens.Keyword, + 'DEFINER': tokens.Keyword, + 'DELIMITER': tokens.Keyword, + 'DELIMITERS': tokens.Keyword, + 'DEREF': tokens.Keyword, + 'DESC': tokens.Keyword, + 'DESCRIBE': tokens.Keyword, + 'DESCRIPTOR': tokens.Keyword, + 'DESTROY': tokens.Keyword, + 'DESTRUCTOR': tokens.Keyword, + 'DETERMINISTIC': tokens.Keyword, + 'DIAGNOSTICS': tokens.Keyword, + 'DICTIONARY': tokens.Keyword, + 'DISCONNECT': tokens.Keyword, + 'DISPATCH': tokens.Keyword, + 'DO': tokens.Keyword, + 'DOMAIN': tokens.Keyword, + 'DYNAMIC': tokens.Keyword, + 'DYNAMIC_FUNCTION': tokens.Keyword, + 'DYNAMIC_FUNCTION_CODE': tokens.Keyword, + + 'EACH': tokens.Keyword, + 'ENCODING': tokens.Keyword, + 'ENCRYPTED': tokens.Keyword, + 'END-EXEC': tokens.Keyword, + 'EQUALS': tokens.Keyword, + 'ESCAPE': tokens.Keyword, + 'EVERY': tokens.Keyword, + 'EXCEPT': tokens.Keyword, + 'ESCEPTION': tokens.Keyword, + 'EXCLUDING': tokens.Keyword, + 'EXCLUSIVE': tokens.Keyword, + 'EXEC': tokens.Keyword, + 'EXECUTE': tokens.Keyword, + 'EXISTING': tokens.Keyword, + 'EXISTS': tokens.Keyword, + 'EXTERNAL': tokens.Keyword, + 'EXTRACT': tokens.Keyword, + + 'FALSE': tokens.Keyword, + 'FETCH': tokens.Keyword, + 'FINAL': tokens.Keyword, + 'FIRST': tokens.Keyword, + 'FORCE': tokens.Keyword, + 'FOREIGN': tokens.Keyword, + 'FORTRAN': tokens.Keyword, + 'FORWARD': tokens.Keyword, + 'FOUND': tokens.Keyword, + 'FREE': tokens.Keyword, + 'FREEZE': tokens.Keyword, + 'FULL': tokens.Keyword, + 'FUNCTION': tokens.Keyword, + +# 'G': tokens.Keyword, + 'GENERAL': tokens.Keyword, + 'GENERATED': tokens.Keyword, + 'GET': tokens.Keyword, + 'GLOBAL': tokens.Keyword, + 'GO': tokens.Keyword, + 'GOTO': tokens.Keyword, + 'GRANT': tokens.Keyword, + 'GRANTED': tokens.Keyword, + 'GROUPING': tokens.Keyword, + + 'HANDLER': tokens.Keyword, + 'HAVING': tokens.Keyword, + 'HIERARCHY': tokens.Keyword, + 'HOLD': tokens.Keyword, + 'HOST': tokens.Keyword, + + 'IDENTITY': tokens.Keyword, + 'IGNORE': tokens.Keyword, + 'ILIKE': tokens.Keyword, + 'IMMEDIATE': tokens.Keyword, + 'IMMUTABLE': tokens.Keyword, + + 'IMPLEMENTATION': tokens.Keyword, + 'IMPLICIT': tokens.Keyword, + 'INCLUDING': tokens.Keyword, + 'INCREMENT': tokens.Keyword, + 'INDEX': tokens.Keyword, + + 'INDITCATOR': tokens.Keyword, + 'INFIX': tokens.Keyword, + 'INHERITS': tokens.Keyword, + 'INITIALIZE': tokens.Keyword, + 'INITIALLY': tokens.Keyword, + 'INOUT': tokens.Keyword, + 'INPUT': tokens.Keyword, + 'INSENSITIVE': tokens.Keyword, + 'INSTANTIABLE': tokens.Keyword, + 'INSTEAD': tokens.Keyword, + 'INTERSECT': tokens.Keyword, + 'INTO': tokens.Keyword, + 'INVOKER': tokens.Keyword, + 'IS': tokens.Keyword, + 'ISNULL': tokens.Keyword, + 'ISOLATION': tokens.Keyword, + 'ITERATE': tokens.Keyword, + +# 'K': tokens.Keyword, + 'KEY': tokens.Keyword, + 'KEY_MEMBER': tokens.Keyword, + 'KEY_TYPE': tokens.Keyword, + + 'LANCOMPILER': tokens.Keyword, + 'LANGUAGE': tokens.Keyword, + 'LARGE': tokens.Keyword, + 'LAST': tokens.Keyword, + 'LATERAL': tokens.Keyword, + 'LEADING': tokens.Keyword, + 'LENGTH': tokens.Keyword, + 'LESS': tokens.Keyword, + 'LEVEL': tokens.Keyword, + 'LIMIT': tokens.Keyword, + 'LISTEN': tokens.Keyword, + 'LOAD': tokens.Keyword, + 'LOCAL': tokens.Keyword, + 'LOCALTIME': tokens.Keyword, + 'LOCALTIMESTAMP': tokens.Keyword, + 'LOCATION': tokens.Keyword, + 'LOCATOR': tokens.Keyword, + 'LOCK': tokens.Keyword, + 'LOWER': tokens.Keyword, + +# 'M': tokens.Keyword, + 'MAP': tokens.Keyword, + 'MATCH': tokens.Keyword, + 'MAXVALUE': tokens.Keyword, + 'MESSAGE_LENGTH': tokens.Keyword, + 'MESSAGE_OCTET_LENGTH': tokens.Keyword, + 'MESSAGE_TEXT': tokens.Keyword, + 'METHOD': tokens.Keyword, + 'MINUTE': tokens.Keyword, + 'MINVALUE': tokens.Keyword, + 'MOD': tokens.Keyword, + 'MODE': tokens.Keyword, + 'MODIFIES': tokens.Keyword, + 'MODIFY': tokens.Keyword, + 'MONTH': tokens.Keyword, + 'MORE': tokens.Keyword, + 'MOVE': tokens.Keyword, + 'MUMPS': tokens.Keyword, + + 'NAMES': tokens.Keyword, + 'NATIONAL': tokens.Keyword, + 'NATURAL': tokens.Keyword, + 'NCHAR': tokens.Keyword, + 'NCLOB': tokens.Keyword, + 'NEW': tokens.Keyword, + 'NEXT': tokens.Keyword, + 'NO': tokens.Keyword, + 'NOCREATEDB': tokens.Keyword, + 'NOCREATEUSER': tokens.Keyword, + 'NONE': tokens.Keyword, + 'NOT': tokens.Keyword, + 'NOTHING': tokens.Keyword, + 'NOTIFY': tokens.Keyword, + 'NOTNULL': tokens.Keyword, + 'NULL': tokens.Keyword, + 'NULLABLE': tokens.Keyword, + 'NULLIF': tokens.Keyword, + + 'OBJECT': tokens.Keyword, + 'OCTET_LENGTH': tokens.Keyword, + 'OF': tokens.Keyword, + 'OFF': tokens.Keyword, + 'OFFSET': tokens.Keyword, + 'OIDS': tokens.Keyword, + 'OLD': tokens.Keyword, + 'ONLY': tokens.Keyword, + 'OPEN': tokens.Keyword, + 'OPERATION': tokens.Keyword, + 'OPERATOR': tokens.Keyword, + 'OPTION': tokens.Keyword, + 'OPTIONS': tokens.Keyword, + 'ORDINALITY': tokens.Keyword, + 'OUT': tokens.Keyword, + 'OUTPUT': tokens.Keyword, + 'OVERLAPS': tokens.Keyword, + 'OVERLAY': tokens.Keyword, + 'OVERRIDING': tokens.Keyword, + 'OWNER': tokens.Keyword, + + 'PAD': tokens.Keyword, + 'PARAMETER': tokens.Keyword, + 'PARAMETERS': tokens.Keyword, + 'PARAMETER_MODE': tokens.Keyword, + 'PARAMATER_NAME': tokens.Keyword, + 'PARAMATER_ORDINAL_POSITION': tokens.Keyword, + 'PARAMETER_SPECIFIC_CATALOG': tokens.Keyword, + 'PARAMETER_SPECIFIC_NAME': tokens.Keyword, + 'PARAMATER_SPECIFIC_SCHEMA': tokens.Keyword, + 'PARTIAL': tokens.Keyword, + 'PASCAL': tokens.Keyword, + 'PENDANT': tokens.Keyword, + 'PLACING': tokens.Keyword, + 'PLI': tokens.Keyword, + 'POSITION': tokens.Keyword, + 'POSTFIX': tokens.Keyword, + 'PRECISION': tokens.Keyword, + 'PREFIX': tokens.Keyword, + 'PREORDER': tokens.Keyword, + 'PREPARE': tokens.Keyword, + 'PRESERVE': tokens.Keyword, + 'PRIMARY': tokens.Keyword, + 'PRIOR': tokens.Keyword, + 'PRIVILEGES': tokens.Keyword, + 'PROCEDURAL': tokens.Keyword, + 'PROCEDURE': tokens.Keyword, + 'PUBLIC': tokens.Keyword, + + 'RAISE': tokens.Keyword, + 'READ': tokens.Keyword, + 'READS': tokens.Keyword, + 'RECHECK': tokens.Keyword, + 'RECURSIVE': tokens.Keyword, + 'REF': tokens.Keyword, + 'REFERENCES': tokens.Keyword, + 'REFERENCING': tokens.Keyword, + 'REINDEX': tokens.Keyword, + 'RELATIVE': tokens.Keyword, + 'RENAME': tokens.Keyword, + 'REPEATABLE': tokens.Keyword, + 'RESET': tokens.Keyword, + 'RESTART': tokens.Keyword, + 'RESTRICT': tokens.Keyword, + 'RESULT': tokens.Keyword, + 'RETURN': tokens.Keyword, + 'RETURNED_LENGTH': tokens.Keyword, + 'RETURNED_OCTET_LENGTH': tokens.Keyword, + 'RETURNED_SQLSTATE': tokens.Keyword, + 'RETURNS': tokens.Keyword, + 'REVOKE': tokens.Keyword, + 'RIGHT': tokens.Keyword, + 'ROLE': tokens.Keyword, + 'ROLLBACK': tokens.Keyword, + 'ROLLUP': tokens.Keyword, + 'ROUTINE': tokens.Keyword, + 'ROUTINE_CATALOG': tokens.Keyword, + 'ROUTINE_NAME': tokens.Keyword, + 'ROUTINE_SCHEMA': tokens.Keyword, + 'ROW': tokens.Keyword, + 'ROWS': tokens.Keyword, + 'ROW_COUNT': tokens.Keyword, + 'RULE': tokens.Keyword, + + 'SAVE_POINT': tokens.Keyword, + 'SCALE': tokens.Keyword, + 'SCHEMA': tokens.Keyword, + 'SCHEMA_NAME': tokens.Keyword, + 'SCOPE': tokens.Keyword, + 'SCROLL': tokens.Keyword, + 'SEARCH': tokens.Keyword, + 'SECOND': tokens.Keyword, + 'SECURITY': tokens.Keyword, + 'SELF': tokens.Keyword, + 'SENSITIVE': tokens.Keyword, + 'SERIALIZABLE': tokens.Keyword, + 'SERVER_NAME': tokens.Keyword, + 'SESSION': tokens.Keyword, + 'SESSION_USER': tokens.Keyword, + 'SETOF': tokens.Keyword, + 'SETS': tokens.Keyword, + 'SHARE': tokens.Keyword, + 'SHOW': tokens.Keyword, + 'SIMILAR': tokens.Keyword, + 'SIMPLE': tokens.Keyword, + 'SIZE': tokens.Keyword, + 'SOME': tokens.Keyword, + 'SOURCE': tokens.Keyword, + 'SPACE': tokens.Keyword, + 'SPECIFIC': tokens.Keyword, + 'SPECIFICTYPE': tokens.Keyword, + 'SPECIFIC_NAME': tokens.Keyword, + 'SQL': tokens.Keyword, + 'SQLCODE': tokens.Keyword, + 'SQLERROR': tokens.Keyword, + 'SQLEXCEPTION': tokens.Keyword, + 'SQLSTATE': tokens.Keyword, + 'SQLWARNING': tokens.Keyword, + 'STABLE': tokens.Keyword, + 'START': tokens.Keyword, + 'STATE': tokens.Keyword, + 'STATEMENT': tokens.Keyword, + 'STATIC': tokens.Keyword, + 'STATISTICS': tokens.Keyword, + 'STDIN': tokens.Keyword, + 'STDOUT': tokens.Keyword, + 'STORAGE': tokens.Keyword, + 'STRICT': tokens.Keyword, + 'STRUCTURE': tokens.Keyword, + 'STYPE': tokens.Keyword, + 'SUBCLASS_ORIGIN': tokens.Keyword, + 'SUBLIST': tokens.Keyword, + 'SUBSTRING': tokens.Keyword, + 'SUM': tokens.Keyword, + 'SYMMETRIC': tokens.Keyword, + 'SYSID': tokens.Keyword, + 'SYSTEM': tokens.Keyword, + 'SYSTEM_USER': tokens.Keyword, + + 'TABLE': tokens.Keyword, + 'TABLE_NAME': tokens.Keyword, + ' TEMP': tokens.Keyword, + 'TEMPLATE': tokens.Keyword, + 'TEMPORARY': tokens.Keyword, + 'TERMINATE': tokens.Keyword, + 'THAN': tokens.Keyword, + 'TIMESTAMP': tokens.Keyword, + 'TIMEZONE_HOUR': tokens.Keyword, + 'TIMEZONE_MINUTE': tokens.Keyword, + 'TO': tokens.Keyword, + 'TOAST': tokens.Keyword, + 'TRAILING': tokens.Keyword, + 'TRANSATION': tokens.Keyword, + 'TRANSACTIONS_COMMITTED': tokens.Keyword, + 'TRANSACTIONS_ROLLED_BACK': tokens.Keyword, + 'TRANSATION_ACTIVE': tokens.Keyword, + 'TRANSFORM': tokens.Keyword, + 'TRANSFORMS': tokens.Keyword, + 'TRANSLATE': tokens.Keyword, + 'TRANSLATION': tokens.Keyword, + 'TREAT': tokens.Keyword, + 'TRIGGER': tokens.Keyword, + 'TRIGGER_CATALOG': tokens.Keyword, + 'TRIGGER_NAME': tokens.Keyword, + 'TRIGGER_SCHEMA': tokens.Keyword, + 'TRIM': tokens.Keyword, + 'TRUE': tokens.Keyword, + 'TRUNCATE': tokens.Keyword, + 'TRUSTED': tokens.Keyword, + 'TYPE': tokens.Keyword, + + 'UNCOMMITTED': tokens.Keyword, + 'UNDER': tokens.Keyword, + 'UNENCRYPTED': tokens.Keyword, + 'UNION': tokens.Keyword, + 'UNIQUE': tokens.Keyword, + 'UNKNOWN': tokens.Keyword, + 'UNLISTEN': tokens.Keyword, + 'UNNAMED': tokens.Keyword, + 'UNNEST': tokens.Keyword, + 'UNTIL': tokens.Keyword, + 'UPPER': tokens.Keyword, + 'USAGE': tokens.Keyword, + 'USER': tokens.Keyword, + 'USER_DEFINED_TYPE_CATALOG': tokens.Keyword, + 'USER_DEFINED_TYPE_NAME': tokens.Keyword, + 'USER_DEFINED_TYPE_SCHEMA': tokens.Keyword, + 'USING': tokens.Keyword, + + 'VACUUM': tokens.Keyword, + 'VALID': tokens.Keyword, + 'VALIDATOR': tokens.Keyword, + 'VALUES': tokens.Keyword, + 'VARIABLE': tokens.Keyword, + 'VERBOSE': tokens.Keyword, + 'VERSION': tokens.Keyword, + 'VIEW': tokens.Keyword, + 'VOLATILE': tokens.Keyword, + + 'WHENEVER': tokens.Keyword, + 'WITH': tokens.Keyword, + 'WITHOUT': tokens.Keyword, + 'WORK': tokens.Keyword, + 'WRITE': tokens.Keyword, + + 'YEAR': tokens.Keyword, + + 'ZONE': tokens.Keyword, + + + 'ARRAY': tokens.Name.Builtin, + 'BIGINT': tokens.Name.Builtin, + 'BINARY': tokens.Name.Builtin, + 'BIT': tokens.Name.Builtin, + 'BLOB': tokens.Name.Builtin, + 'BOOLEAN': tokens.Name.Builtin, + 'CHAR': tokens.Name.Builtin, + 'CHARACTER': tokens.Name.Builtin, + 'DATE': tokens.Name.Builtin, + 'DEC': tokens.Name.Builtin, + 'DECIMAL': tokens.Name.Builtin, + 'FLOAT': tokens.Name.Builtin, + 'INT': tokens.Name.Builtin, + 'INTEGER': tokens.Name.Builtin, + 'INTERVAL': tokens.Name.Builtin, + 'LONG': tokens.Name.Builtin, + 'NUMBER': tokens.Name.Builtin, + 'NUMERIC': tokens.Name.Builtin, + 'REAL': tokens.Name.Builtin, + 'SERIAL': tokens.Name.Builtin, + 'SMALLINT': tokens.Name.Builtin, + 'VARCHAR': tokens.Name.Builtin, + 'VARCHAR2': tokens.Name.Builtin, + 'VARYING': tokens.Name.Builtin, + 'INT8': tokens.Name.Builtin, + 'SERIAL8': tokens.Name.Builtin, + 'TEXT': tokens.Name.Builtin, } KEYWORDS_COMMON = { - 'SELECT': Keyword.DML, - 'INSERT': Keyword.DML, - 'DELETE': Keyword.DML, - 'UPDATE': Keyword.DML, - 'DROP': Keyword.DDL, - 'CREATE': Keyword.DDL, - 'ALTER': Keyword.DDL, - - 'WHERE': Keyword, - 'FROM': Keyword, - 'INNER': Keyword, - 'JOIN': Keyword, - 'AND': Keyword, - 'OR': Keyword, - 'LIKE': Keyword, - 'ON': Keyword, - 'IN': Keyword, - 'SET': Keyword, - - 'BY': Keyword, - 'GROUP': Keyword, - 'ORDER': Keyword, - 'LEFT': Keyword, - 'OUTER': Keyword, - - 'IF': Keyword, - 'END': Keyword, - 'THEN': Keyword, - 'LOOP': Keyword, - 'AS': Keyword, - 'ELSE': Keyword, - 'FOR': Keyword, - - 'CASE': Keyword, - 'WHEN': Keyword, - 'MIN': Keyword, - 'MAX': Keyword, - 'DISTINCT': Keyword, - + 'SELECT': tokens.Keyword.DML, + 'INSERT': tokens.Keyword.DML, + 'DELETE': tokens.Keyword.DML, + 'UPDATE': tokens.Keyword.DML, + 'REPLACE': tokens.Keyword.DML, + 'DROP': tokens.Keyword.DDL, + 'CREATE': tokens.Keyword.DDL, + 'ALTER': tokens.Keyword.DDL, + + 'WHERE': tokens.Keyword, + 'FROM': tokens.Keyword, + 'INNER': tokens.Keyword, + 'JOIN': tokens.Keyword, + 'AND': tokens.Keyword, + 'OR': tokens.Keyword, + 'LIKE': tokens.Keyword, + 'ON': tokens.Keyword, + 'IN': tokens.Keyword, + 'SET': tokens.Keyword, + + 'BY': tokens.Keyword, + 'GROUP': tokens.Keyword, + 'ORDER': tokens.Keyword, + 'LEFT': tokens.Keyword, + 'OUTER': tokens.Keyword, + + 'IF': tokens.Keyword, + 'END': tokens.Keyword, + 'THEN': tokens.Keyword, + 'LOOP': tokens.Keyword, + 'AS': tokens.Keyword, + 'ELSE': tokens.Keyword, + 'FOR': tokens.Keyword, + + 'CASE': tokens.Keyword, + 'WHEN': tokens.Keyword, + 'MIN': tokens.Keyword, + 'MAX': tokens.Keyword, + 'DISTINCT': tokens.Keyword, } diff --git a/debug_toolbar/utils/sqlparse/lexer.py b/debug_toolbar/utils/sqlparse/lexer.py index 727a4ff9d..ae3fc2e95 100644 --- a/debug_toolbar/utils/sqlparse/lexer.py +++ b/debug_toolbar/utils/sqlparse/lexer.py @@ -14,14 +14,14 @@ import re +from debug_toolbar.utils.sqlparse import tokens from debug_toolbar.utils.sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON -from debug_toolbar.utils.sqlparse.tokens import * -from debug_toolbar.utils.sqlparse.tokens import _TokenType class include(str): pass + class combined(tuple): """Indicates a state combined from multiple states.""" @@ -32,9 +32,10 @@ def __init__(self, *args): # tuple.__init__ doesn't do anything pass + def is_keyword(value): test = value.upper() - return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, Name)), value + return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value def apply_filters(stream, filters, lexer=None): @@ -43,9 +44,11 @@ def apply_filters(stream, filters, lexer=None): a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`. """ + def _apply(filter_, stream): for token in filter_.filter(lexer, stream): yield token + for filter_ in filters: stream = _apply(filter_, stream) return stream @@ -62,13 +65,14 @@ def _process_state(cls, unprocessed, processed, state): assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] - tokens = processed[state] = [] + tokenlist = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state - tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) + tokenlist.extend(cls._process_state( + unprocessed, processed, str(tdef))) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef @@ -76,11 +80,13 @@ def _process_state(cls, unprocessed, processed, state): try: rex = re.compile(tdef[0], rflags).match except Exception, err: - raise ValueError("uncompilable regex %r in state %r of %r: %s" % - (tdef[0], state, cls, err)) + raise ValueError(("uncompilable regex %r in state" + " %r of %r: %s" + % (tdef[0], state, cls, err))) - assert type(tdef[1]) is _TokenType or callable(tdef[1]), \ - 'token type must be simple type or callable, not %r' % (tdef[1],) + assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \ + ('token type must be simple type or callable, not %r' + % (tdef[1],)) if len(tdef) == 2: new_state = None @@ -104,7 +110,8 @@ def _process_state(cls, unprocessed, processed, state): cls._tmpname += 1 itokens = [] for istate in tdef2: - assert istate != state, 'circular state ref %r' % istate + assert istate != state, \ + 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[new_state] = itokens @@ -118,8 +125,8 @@ def _process_state(cls, unprocessed, processed, state): new_state = tdef2 else: assert False, 'unknown new state def %r' % tdef2 - tokens.append((rex, tdef[1], new_state)) - return tokens + tokenlist.append((rex, tdef[1], new_state)) + return tokenlist def process_tokendef(cls): cls._all_tokens = {} @@ -143,9 +150,7 @@ def __call__(cls, *args, **kwds): return type.__call__(cls, *args, **kwds) - - -class Lexer: +class Lexer(object): __metaclass__ = LexerMeta @@ -157,41 +162,53 @@ class Lexer: tokens = { 'root': [ - (r'--.*?(\r|\n|\r\n)', Comment.Single), - (r'(\r|\n|\r\n)', Newline), - (r'\s+', Whitespace), - (r'/\*', Comment.Multiline, 'multiline-comments'), - (r':=', Assignment), - (r'::', Punctuation), - (r'[*]', Wildcard), - (r"`(``|[^`])*`", Name), - (r"´(´´|[^´])*´", Name), - (r'@[a-zA-Z_][a-zA-Z0-9_]+', Name), - (r'[+/<>=~!@#%^&|`?^-]', Operator), - (r'[0-9]+', Number.Integer), + (r'--.*?(\r\n|\r|\n)', tokens.Comment.Single), + # $ matches *before* newline, therefore we have two patterns + # to match Comment.Single + (r'--.*?$', tokens.Comment.Single), + (r'(\r|\n|\r\n)', tokens.Newline), + (r'\s+', tokens.Whitespace), + (r'/\*', tokens.Comment.Multiline, 'multiline-comments'), + (r':=', tokens.Assignment), + (r'::', tokens.Punctuation), + (r'[*]', tokens.Wildcard), + (r'CASE\b', tokens.Keyword), # extended CASE(foo) + (r"`(``|[^`])*`", tokens.Name), + (r"´(´´|[^´])*´", tokens.Name), + (r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', tokens.Name.Builtin), + (r'\?{1}', tokens.Name.Placeholder), + (r'[$:?%][a-zA-Z0-9_]+[^$:?%]?', tokens.Name.Placeholder), + (r'@[a-zA-Z_][a-zA-Z0-9_]+', tokens.Name), + (r'[a-zA-Z_][a-zA-Z0-9_]*(?=[.(])', tokens.Name), # see issue39 + (r'[<>=~!]+', tokens.Operator.Comparison), + (r'[+/@#%^&|`?^-]+', tokens.Operator), + (r'0x[0-9a-fA-F]+', tokens.Number.Hexadecimal), + (r'[0-9]*\.[0-9]+', tokens.Number.Float), + (r'[0-9]+', tokens.Number.Integer), # TODO: Backslash escapes? - (r"'(''|[^'])*'", String.Single), - (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL - (r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN', Keyword), - (r'END( IF| LOOP)?', Keyword), - (r'CREATE( OR REPLACE)?', Keyword.DDL), + (r"(''|'.*?[^\\]')", tokens.String.Single), + # not a real string literal in ANSI SQL: + (r'(""|".*?[^\\]")', tokens.String.Symbol), + (r'(\[.*[^\]]\])', tokens.Name), + (r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', tokens.Keyword), + (r'END( IF| LOOP)?\b', tokens.Keyword), + (r'NOT NULL\b', tokens.Keyword), + (r'CREATE( OR REPLACE)?\b', tokens.Keyword.DDL), (r'[a-zA-Z_][a-zA-Z0-9_]*', is_keyword), - (r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', Name.Builtin), - (r'[;:()\[\],\.]', Punctuation), + (r'[;:()\[\],\.]', tokens.Punctuation), ], 'multiline-comments': [ - (r'/\*', Comment.Multiline, 'multiline-comments'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), - (r'[/*]', Comment.Multiline) - ] - } + (r'/\*', tokens.Comment.Multiline, 'multiline-comments'), + (r'\*/', tokens.Comment.Multiline, '#pop'), + (r'[^/\*]+', tokens.Comment.Multiline), + (r'[/*]', tokens.Comment.Multiline) + ]} def __init__(self): self.filters = [] def add_filter(self, filter_, **options): - from sqlparse.filters import Filter + from debug_toolbar.utils.sqlparse.filters import Filter if not isinstance(filter_, Filter): filter_ = filter_(**options) self.filters.append(filter_) @@ -241,7 +258,6 @@ def streamer(): stream = apply_filters(stream, self.filters, self) return stream - def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. @@ -261,7 +277,7 @@ def get_tokens_unprocessed(self, text, stack=('root',)): value = m.group() if value in known_names: yield pos, known_names[value], value - elif type(action) is _TokenType: + elif type(action) is tokens._TokenType: yield pos, action, value elif hasattr(action, '__call__'): ttype, value = action(value) @@ -297,9 +313,9 @@ def get_tokens_unprocessed(self, text, stack=('root',)): pos += 1 statestack = ['root'] statetokens = tokendefs['root'] - yield pos, Text, u'\n' + yield pos, tokens.Text, u'\n' continue - yield pos, Error, text[pos] + yield pos, tokens.Error, text[pos] pos += 1 except IndexError: break diff --git a/debug_toolbar/utils/sqlparse/sql.py b/debug_toolbar/utils/sqlparse/sql.py index 5bbb9773e..55bf804b9 100644 --- a/debug_toolbar/utils/sqlparse/sql.py +++ b/debug_toolbar/utils/sqlparse/sql.py @@ -3,7 +3,6 @@ """This module contains classes representing syntactical elements of SQL.""" import re -import types from debug_toolbar.utils.sqlparse import tokens as T @@ -16,14 +15,15 @@ class Token(object): the type of the token. """ - __slots__ = ('value', 'ttype',) + __slots__ = ('value', 'ttype', 'parent') def __init__(self, ttype, value): self.value = value self.ttype = ttype + self.parent = None def __str__(self): - return unicode(self).encode('latin-1') + return unicode(self).encode('utf-8') def __repr__(self): short = self._get_repr_value() @@ -43,7 +43,7 @@ def _get_repr_name(self): def _get_repr_value(self): raw = unicode(self) if len(raw) > 7: - short = raw[:6]+u'...' + short = raw[:6] + u'...' else: short = raw return re.sub('\s+', ' ', short) @@ -59,12 +59,12 @@ def match(self, ttype, values, regex=False): type. *values* is a list of possible values for this token. The values are OR'ed together so if only one of the values matches ``True`` - is returned. Except for keyword tokens the comparsion is + is returned. Except for keyword tokens the comparison is case-sensitive. For convenience it's ok to pass in a single string. If *regex* is ``True`` (default is ``False``) the given values are treated as regular expressions. """ - type_matched = self.ttype in ttype + type_matched = self.ttype is ttype if not type_matched or values is None: return type_matched if isinstance(values, basestring): @@ -79,7 +79,7 @@ def match(self, ttype, values, regex=False): return True return False else: - if self.ttype is T.Keyword: + if self.ttype in T.Keyword: values = set([v.upper() for v in values]) return self.value.upper() in values else: @@ -93,6 +93,32 @@ def is_whitespace(self): """Return ``True`` if this token is a whitespace token.""" return self.ttype and self.ttype in T.Whitespace + def within(self, group_cls): + """Returns ``True`` if this token is within *group_cls*. + + Use this method for example to check if an identifier is within + a function: ``t.within(sql.Function)``. + """ + parent = self.parent + while parent: + if isinstance(parent, group_cls): + return True + parent = parent.parent + return False + + def is_child_of(self, other): + """Returns ``True`` if this token is a direct child of *other*.""" + return self.parent == other + + def has_ancestor(self, other): + """Returns ``True`` if *other* is in this tokens ancestry.""" + parent = self.parent + while parent: + if parent == other: + return True + parent = parent.parent + return False + class TokenList(Token): """A group of tokens. @@ -113,24 +139,24 @@ def __unicode__(self): return ''.join(unicode(x) for x in self.flatten()) def __str__(self): - return unicode(self).encode('latin-1') + return unicode(self).encode('utf-8') def _get_repr_name(self): return self.__class__.__name__ - ## def _pprint_tree(self, max_depth=None, depth=0): - ## """Pretty-print the object tree.""" - ## indent = ' '*(depth*2) - ## for token in self.tokens: - ## if token.is_group(): - ## pre = ' | ' - ## else: - ## pre = ' | ' - ## print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(), - ## token._get_repr_value()) - ## if (token.is_group() and max_depth is not None - ## and depth < max_depth): - ## token._pprint_tree(max_depth, depth+1) + def _pprint_tree(self, max_depth=None, depth=0): + """Pretty-print the object tree.""" + indent = ' '*(depth*2) + for idx, token in enumerate(self.tokens): + if token.is_group(): + pre = ' +-' + else: + pre = ' | ' + print '%s%s%d %s \'%s\'' % (indent, pre, idx, + token._get_repr_name(), + token._get_repr_value()) + if (token.is_group() and (max_depth is None or depth < max_depth)): + token._pprint_tree(max_depth, depth+1) def flatten(self): """Generator yielding ungrouped tokens. @@ -150,6 +176,10 @@ def is_group(self): def get_sublists(self): return [x for x in self.tokens if isinstance(x, TokenList)] + @property + def _groupable_tokens(self): + return self.tokens + def token_first(self, ignore_whitespace=True): """Returns the first child token. @@ -190,7 +220,7 @@ def token_next_by_type(self, idx, ttypes): def token_next_match(self, idx, ttype, value, regex=False): """Returns next token where it's ``match`` method returns ``True``.""" - if type(idx) != types.IntType: + if not isinstance(idx, int): idx = self.token_index(idx) for token in self.tokens[idx:]: if token.match(ttype, value, regex): @@ -202,8 +232,8 @@ def token_not_matching(self, idx, funcs): passed = False for func in funcs: if func(token): - passed = True - break + passed = True + break if not passed: return token return None @@ -241,7 +271,7 @@ def token_next(self, idx, skip_ws=True): return None if not isinstance(idx, int): idx = self.token_index(idx) - while idx < len(self.tokens)-1: + while idx < len(self.tokens) - 1: idx += 1 if self.tokens[idx].is_whitespace() and skip_ws: continue @@ -257,18 +287,27 @@ def tokens_between(self, start, end, exclude_end=False): If *exclude_end* is ``True`` (default is ``False``) the end token is included too. """ + # FIXME(andi): rename exclude_end to inlcude_end if exclude_end: offset = 0 else: offset = 1 - return self.tokens[self.token_index(start):self.token_index(end)+offset] + end_idx = self.token_index(end) + offset + start_idx = self.token_index(start) + return self.tokens[start_idx:end_idx] - def group_tokens(self, grp_cls, tokens): + def group_tokens(self, grp_cls, tokens, ignore_ws=False): """Replace tokens by an instance of *grp_cls*.""" idx = self.token_index(tokens[0]) + if ignore_ws: + while tokens and tokens[-1].is_whitespace(): + tokens = tokens[:-1] for t in tokens: self.tokens.remove(t) grp = grp_cls(tokens) + for token in tokens: + token.parent = grp + grp.parent = self self.tokens.insert(idx, grp) return grp @@ -290,7 +329,11 @@ def get_type(self): isn't a DML or DDL keyword "UNKNOWN" is returned. """ first_token = self.token_first() - if first_token.ttype in (T.Keyword.DML, T.Keyword.DDL): + if first_token is None: + # An "empty" statement that either has not tokens at all + # or only whitespace tokens. + return 'UNKNOWN' + elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL): return first_token.value.upper() else: return 'UNKNOWN' @@ -397,27 +440,36 @@ class Parenthesis(TokenList): """Tokens between parenthesis.""" __slots__ = ('value', 'ttype', 'tokens') + @property + def _groupable_tokens(self): + return self.tokens[1:-1] + class Assignment(TokenList): """An assignment like 'var := val;'""" __slots__ = ('value', 'ttype', 'tokens') + class If(TokenList): """An 'if' clause with possible 'else if' or 'else' parts.""" __slots__ = ('value', 'ttype', 'tokens') + class For(TokenList): """A 'FOR' loop.""" __slots__ = ('value', 'ttype', 'tokens') -class Comparsion(TokenList): - """A comparsion used for example in WHERE clauses.""" + +class Comparison(TokenList): + """A comparison used for example in WHERE clauses.""" __slots__ = ('value', 'ttype', 'tokens') + class Comment(TokenList): """A comment.""" __slots__ = ('value', 'ttype', 'tokens') + class Where(TokenList): """A WHERE clause.""" __slots__ = ('value', 'ttype', 'tokens') @@ -434,9 +486,12 @@ def get_cases(self): If an ELSE exists condition is None. """ ret = [] - in_condition = in_value = False + in_value = False + in_condition = True for token in self.tokens: - if token.match(T.Keyword, 'WHEN'): + if token.match(T.Keyword, 'CASE'): + continue + elif token.match(T.Keyword, 'WHEN'): ret.append(([], [])) in_condition = True in_value = False @@ -450,8 +505,25 @@ def get_cases(self): elif token.match(T.Keyword, 'END'): in_condition = False in_value = False + if (in_condition or in_value) and not ret: + # First condition withou preceding WHEN + ret.append(([], [])) if in_condition: ret[-1][0].append(token) elif in_value: ret[-1][1].append(token) return ret + + +class Function(TokenList): + """A function or procedure call.""" + + __slots__ = ('value', 'ttype', 'tokens') + + def get_parameters(self): + """Return a list of parameters.""" + parenthesis = self.tokens[-1] + for t in parenthesis.tokens: + if isinstance(t, IdentifierList): + return t.get_identifiers() + return [] diff --git a/debug_toolbar/utils/sqlparse/tokens.py b/debug_toolbar/utils/sqlparse/tokens.py index 2c63c4177..01a9b896e 100644 --- a/debug_toolbar/utils/sqlparse/tokens.py +++ b/debug_toolbar/utils/sqlparse/tokens.py @@ -9,11 +9,6 @@ """Tokens""" -try: - set -except NameError: - from sets import Set as set - class _TokenType(tuple): parent = None @@ -27,22 +22,14 @@ def split(self): buf.reverse() return buf - def __init__(self, *args): - # no need to call super.__init__ - self.subtypes = set() - def __contains__(self, val): - return self is val or ( - type(val) is self.__class__ and - val[:len(self)] == self - ) + return val is not None and (self is val or val[:len(self)] == self) def __getattr__(self, val): if not val or not val[0].isupper(): return tuple.__getattribute__(self, val) new = _TokenType(self + (val,)) setattr(self, val, new) - self.subtypes.add(new) new.parent = self return new @@ -53,30 +40,31 @@ def __repr__(self): return 'Token' + (self and '.' or '') + '.'.join(self) -Token = _TokenType() +Token = _TokenType() # Special token types -Text = Token.Text -Whitespace = Text.Whitespace -Newline = Whitespace.Newline -Error = Token.Error +Text = Token.Text +Whitespace = Text.Whitespace +Newline = Whitespace.Newline +Error = Token.Error # Text that doesn't belong to this lexer (e.g. HTML in PHP) -Other = Token.Other +Other = Token.Other # Common token types for source code -Keyword = Token.Keyword -Name = Token.Name -Literal = Token.Literal -String = Literal.String -Number = Literal.Number +Keyword = Token.Keyword +Name = Token.Name +Literal = Token.Literal +String = Literal.String +Number = Literal.Number Punctuation = Token.Punctuation -Operator = Token.Operator -Wildcard = Token.Wildcard -Comment = Token.Comment -Assignment = Token.Assignement +Operator = Token.Operator +Comparison = Operator.Comparison +Wildcard = Token.Wildcard +Comment = Token.Comment +Assignment = Token.Assignement # Generic types for non-source code -Generic = Token.Generic +Generic = Token.Generic # String and some others are not direct childs of Token. # alias them: @@ -93,39 +81,3 @@ def __repr__(self): Group.Parenthesis = Token.Group.Parenthesis Group.Comment = Token.Group.Comment Group.Where = Token.Group.Where - - -def is_token_subtype(ttype, other): - """ - Return True if ``ttype`` is a subtype of ``other``. - - exists for backwards compatibility. use ``ttype in other`` now. - """ - return ttype in other - - -def string_to_tokentype(s): - """ - Convert a string into a token type:: - - >>> string_to_token('String.Double') - Token.Literal.String.Double - >>> string_to_token('Token.Literal.Number') - Token.Literal.Number - >>> string_to_token('') - Token - - Tokens that are already tokens are returned unchanged: - - >>> string_to_token(String) - Token.Literal.String - """ - if isinstance(s, _TokenType): - return s - if not s: - return Token - node = Token - for item in s.split('.'): - node = getattr(node, item) - return node - From 6606b10870e16e5eadcc88f67564da492b4b6e08 Mon Sep 17 00:00:00 2001 From: Olexiy Strashko Date: Sat, 13 Aug 2011 19:28:44 +0300 Subject: [PATCH 0011/1758] Fixed opened issue #193 (exception on windows when working with file path): - change '/' path separator usage to os.sep --- debug_toolbar/panels/profiling.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debug_toolbar/panels/profiling.py b/debug_toolbar/panels/profiling.py index b3dedf8f3..289e1f769 100644 --- a/debug_toolbar/panels/profiling.py +++ b/debug_toolbar/panels/profiling.py @@ -16,6 +16,7 @@ import cProfile from pstats import Stats from colorsys import hsv_to_rgb +import os class DjangoDebugToolbarStats(Stats): __root = None @@ -64,7 +65,7 @@ def func_std_string(self): # match what old profile produced if idx > -1: file_name=file_name[idx+14:] - file_path, file_name = file_name.rsplit('/', 1) + file_path, file_name = file_name.rsplit(os.sep, 1) return mark_safe('{0}/{1} in {3}({2})'.format( file_path, From d55d8192dff37c9c025e9d97813d40f2081d69d7 Mon Sep 17 00:00:00 2001 From: Jason Keene Date: Sat, 20 Aug 2011 01:03:05 -0400 Subject: [PATCH 0012/1758] Included jQuery breaks newer jQuery plugins, force noConflict for jQuery global object as well as $. --- debug_toolbar/media/debug_toolbar/js/toolbar.js | 2 +- debug_toolbar/media/debug_toolbar/js/toolbar.min.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/debug_toolbar/media/debug_toolbar/js/toolbar.js b/debug_toolbar/media/debug_toolbar/js/toolbar.js index 947242ff9..3d667694d 100644 --- a/debug_toolbar/media/debug_toolbar/js/toolbar.js +++ b/debug_toolbar/media/debug_toolbar/js/toolbar.js @@ -205,4 +205,4 @@ window.djdt = (function(window, document, jQuery) { djdt.init(); }); return djdt; -}(window, document, jQuery.noConflict())); +}(window, document, jQuery.noConflict(true))); diff --git a/debug_toolbar/media/debug_toolbar/js/toolbar.min.js b/debug_toolbar/media/debug_toolbar/js/toolbar.min.js index 137c0f41d..4f843e218 100644 --- a/debug_toolbar/media/debug_toolbar/js/toolbar.min.js +++ b/debug_toolbar/media/debug_toolbar/js/toolbar.min.js @@ -20,4 +20,4 @@ * Released under the MIT, BSD, and GPL Licenses. * More information: http://sizzlejs.com/ */ -}(function(){var a5=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,a6=0,a8=Object.prototype.toString,a0=false,aZ=true;[0,0].sort(function(){aZ=false;return 0});var aW=function(bh,bc,bk,bl){bk=bk||[];var bn=bc=bc||aa;if(bc.nodeType!==1&&bc.nodeType!==9){return[]}if(!bh||typeof bh!=="string"){return bk}var bi=[],be,bp,bs,bd,bg=true,bf=aX(bc),bm=bh;while((a5.exec(""),be=a5.exec(bm))!==null){bm=be[3];bi.push(be[1]);if(be[2]){bd=be[3];break}}if(bi.length>1&&a1.exec(bh)){if(bi.length===2&&a2.relative[bi[0]]){bp=a9(bi[0]+bi[1],bc)}else{bp=a2.relative[bi[0]]?[bc]:aW(bi.shift(),bc);while(bi.length){bh=bi.shift();if(a2.relative[bh]){bh+=bi.shift()}bp=a9(bh,bp)}}}else{if(!bl&&bi.length>1&&bc.nodeType===9&&!bf&&a2.match.ID.test(bi[0])&&!a2.match.ID.test(bi[bi.length-1])){var bo=aW.find(bi.shift(),bc,bf);bc=bo.expr?aW.filter(bo.expr,bo.set)[0]:bo.set[0]}if(bc){var bo=bl?{expr:bi.pop(),set:a4(bl)}:aW.find(bi.pop(),bi.length===1&&(bi[0]==="~"||bi[0]==="+")&&bc.parentNode?bc.parentNode:bc,bf);bp=bo.expr?aW.filter(bo.expr,bo.set):bo.set;if(bi.length>0){bs=a4(bp)}else{bg=false}while(bi.length){var br=bi.pop(),bq=br;if(!a2.relative[br]){br=""}else{bq=bi.pop()}if(bq==null){bq=bc}a2.relative[br](bs,bq,bf)}}else{bs=bi=[]}}if(!bs){bs=bp}if(!bs){aW.error(br||bh)}if(a8.call(bs)==="[object Array]"){if(!bg){bk.push.apply(bk,bs)}else{if(bc&&bc.nodeType===1){for(var bj=0;bs[bj]!=null;bj++){if(bs[bj]&&(bs[bj]===true||bs[bj].nodeType===1&&a3(bc,bs[bj]))){bk.push(bp[bj])}}}else{for(var bj=0;bs[bj]!=null;bj++){if(bs[bj]&&bs[bj].nodeType===1){bk.push(bp[bj])}}}}}else{a4(bs,bk)}if(bd){aW(bd,bn,bk,bl);aW.uniqueSort(bk)}return bk};aW.uniqueSort=function(bd){if(a7){a0=aZ;bd.sort(a7);if(a0){for(var bc=1;bc":function(bi,bd){var bg=typeof bd==="string";if(bg&&!/\W/.test(bd)){bd=bd.toLowerCase();for(var be=0,bc=bi.length;be=0)){if(!be){bc.push(bh)}}else{if(be){bd[bg]=false}}}}return false},ID:function(bc){return bc[1].replace(/\\/g,"")},TAG:function(bd,bc){return bd[1].toLowerCase()},CHILD:function(bc){if(bc[1]==="nth"){var bd=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(bc[2]==="even"&&"2n"||bc[2]==="odd"&&"2n+1"||!/\D/.test(bc[2])&&"0n+"+bc[2]||bc[2]);bc[2]=(bd[1]+(bd[2]||1))-0;bc[3]=bd[3]-0}bc[0]=a6++;return bc},ATTR:function(bg,bd,be,bc,bh,bi){var bf=bg[1].replace(/\\/g,"");if(!bi&&a2.attrMap[bf]){bg[1]=a2.attrMap[bf]}if(bg[2]==="~="){bg[4]=" "+bg[4]+" "}return bg},PSEUDO:function(bg,bd,be,bc,bh){if(bg[1]==="not"){if((a5.exec(bg[3])||"").length>1||/^\w/.test(bg[3])){bg[3]=aW(bg[3],null,null,bd)}else{var bf=aW.filter(bg[3],bd,be,true^bh);if(!be){bc.push.apply(bc,bf)}return false}}else{if(a2.match.POS.test(bg[0])||a2.match.CHILD.test(bg[0])){return true}}return bg},POS:function(bc){bc.unshift(true);return bc}},filters:{enabled:function(bc){return bc.disabled===false&&bc.type!=="hidden"},disabled:function(bc){return bc.disabled===true},checked:function(bc){return bc.checked===true},selected:function(bc){bc.parentNode.selectedIndex;return bc.selected===true},parent:function(bc){return !!bc.firstChild},empty:function(bc){return !bc.firstChild},has:function(be,bd,bc){return !!aW(bc[3],be).length},header:function(bc){return/h\d/i.test(bc.nodeName)},text:function(bc){return"text"===bc.type},radio:function(bc){return"radio"===bc.type},checkbox:function(bc){return"checkbox"===bc.type},file:function(bc){return"file"===bc.type},password:function(bc){return"password"===bc.type},submit:function(bc){return"submit"===bc.type},image:function(bc){return"image"===bc.type},reset:function(bc){return"reset"===bc.type},button:function(bc){return"button"===bc.type||bc.nodeName.toLowerCase()==="button"},input:function(bc){return/input|select|textarea|button/i.test(bc.nodeName)}},setFilters:{first:function(bd,bc){return bc===0},last:function(be,bd,bc,bf){return bd===bf.length-1},even:function(bd,bc){return bc%2===0},odd:function(bd,bc){return bc%2===1},lt:function(be,bd,bc){return bdbc[3]-0},nth:function(be,bd,bc){return bc[3]-0===bd},eq:function(be,bd,bc){return bc[3]-0===bd}},filter:{PSEUDO:function(bi,be,bf,bj){var bd=be[1],bg=a2.filters[bd];if(bg){return bg(bi,bf,be,bj)}else{if(bd==="contains"){return(bi.textContent||bi.innerText||aV([bi])||"").indexOf(be[3])>=0}else{if(bd==="not"){var bh=be[3];for(var bf=0,bc=bh.length;bf=0)}}},ID:function(bd,bc){return bd.nodeType===1&&bd.getAttribute("id")===bc},TAG:function(bd,bc){return(bc==="*"&&bd.nodeType===1)||bd.nodeName.toLowerCase()===bc},CLASS:function(bd,bc){return(" "+(bd.className||bd.getAttribute("class"))+" ").indexOf(bc)>-1},ATTR:function(bh,bf){var be=bf[1],bc=a2.attrHandle[be]?a2.attrHandle[be](bh):bh[be]!=null?bh[be]:bh.getAttribute(be),bi=bc+"",bg=bf[2],bd=bf[4];return bc==null?bg==="!=":bg==="="?bi===bd:bg==="*="?bi.indexOf(bd)>=0:bg==="~="?(" "+bi+" ").indexOf(bd)>=0:!bd?bi&&bc!==false:bg==="!="?bi!==bd:bg==="^="?bi.indexOf(bd)===0:bg==="$="?bi.substr(bi.length-bd.length)===bd:bg==="|="?bi===bd||bi.substr(0,bd.length+1)===bd+"-":false},POS:function(bg,bd,be,bh){var bc=bd[2],bf=a2.setFilters[bc];if(bf){return bf(bg,be,bd,bh)}}}};var a1=a2.match.POS;for(var aY in a2.match){a2.match[aY]=new RegExp(a2.match[aY].source+/(?![^\[]*\])(?![^\(]*\))/.source);a2.leftMatch[aY]=new RegExp(/(^(?:.|\r|\n)*?)/.source+a2.match[aY].source.replace(/\\(\d+)/g,function(bd,bc){return"\\"+(bc-0+1)}))}var a4=function(bd,bc){bd=Array.prototype.slice.call(bd,0);if(bc){bc.push.apply(bc,bd);return bc}return bd};try{Array.prototype.slice.call(aa.documentElement.childNodes,0)}catch(bb){a4=function(bg,bf){var bd=bf||[];if(a8.call(bg)==="[object Array]"){Array.prototype.push.apply(bd,bg)}else{if(typeof bg.length==="number"){for(var be=0,bc=bg.length;be";var bc=aa.documentElement;bc.insertBefore(bd,bc.firstChild);if(aa.getElementById(be)){a2.find.ID=function(bg,bh,bi){if(typeof bh.getElementById!=="undefined"&&!bi){var bf=bh.getElementById(bg[1]);return bf?bf.id===bg[1]||typeof bf.getAttributeNode!=="undefined"&&bf.getAttributeNode("id").nodeValue===bg[1]?[bf]:B:[]}};a2.filter.ID=function(bh,bf){var bg=typeof bh.getAttributeNode!=="undefined"&&bh.getAttributeNode("id");return bh.nodeType===1&&bg&&bg.nodeValue===bf}}bc.removeChild(bd);bc=bd=null})();(function(){var bc=aa.createElement("div");bc.appendChild(aa.createComment(""));if(bc.getElementsByTagName("*").length>0){a2.find.TAG=function(bd,bh){var bg=bh.getElementsByTagName(bd[1]);if(bd[1]==="*"){var bf=[];for(var be=0;bg[be];be++){if(bg[be].nodeType===1){bf.push(bg[be])}}bg=bf}return bg}}bc.innerHTML="";if(bc.firstChild&&typeof bc.firstChild.getAttribute!=="undefined"&&bc.firstChild.getAttribute("href")!=="#"){a2.attrHandle.href=function(bd){return bd.getAttribute("href",2)}}bc=null})();if(aa.querySelectorAll){(function(){var bc=aW,be=aa.createElement("div");be.innerHTML="

";if(be.querySelectorAll&&be.querySelectorAll(".TEST").length===0){return}aW=function(bi,bh,bf,bg){bh=bh||aa;if(!bg&&bh.nodeType===9&&!aX(bh)){try{return a4(bh.querySelectorAll(bi),bf)}catch(bj){}}return bc(bi,bh,bf,bg)};for(var bd in bc){aW[bd]=bc[bd]}be=null})()}(function(){var bc=aa.createElement("div");bc.innerHTML="
";if(!bc.getElementsByClassName||bc.getElementsByClassName("e").length===0){return}bc.lastChild.className="e";if(bc.getElementsByClassName("e").length===1){return}a2.order.splice(1,0,"CLASS");a2.find.CLASS=function(bd,be,bf){if(typeof be.getElementsByClassName!=="undefined"&&!bf){return be.getElementsByClassName(bd[1])}};bc=null})();function aU(bd,bi,bh,bl,bj,bk){for(var bf=0,be=bl.length;bf0){bg=bc;break}}}bc=bc[bd]}bl[bf]=bg}}}var a3=aa.compareDocumentPosition?function(bd,bc){return bd.compareDocumentPosition(bc)&16}:function(bd,bc){return bd!==bc&&(bd.contains?bd.contains(bc):true)};var aX=function(bc){var bd=(bc?bc.ownerDocument||bc:0).documentElement;return bd?bd.nodeName!=="HTML":false};var a9=function(bc,bj){var bf=[],bg="",bh,be=bj.nodeType?[bj]:bj;while((bh=a2.match.PSEUDO.exec(bc))){bg+=bh[0];bc=bc.replace(a2.match.PSEUDO,"")}bc=a2.relative[bc]?bc+"*":bc;for(var bi=0,bd=be.length;bi=0)===aU})};a.fn.extend({find:function(aU){var aW=this.pushStack("","find",aU),aZ=0;for(var aX=0,aV=this.length;aX0){for(var a0=aZ;a00},closest:function(a3,aU){if(a.isArray(a3)){var a0=[],a2=this[0],aZ,aY={},aW;if(a2&&a3.length){for(var aX=0,aV=a3.length;aX-1:a(a2).is(aZ)){a0.push({selector:aW,elem:a2});delete aY[aW]}}a2=a2.parentNode}}return a0}var a1=a.expr.match.POS.test(a3)?a(a3,aU||this.context):null;return this.map(function(a4,a5){while(a5&&a5.ownerDocument&&a5!==aU){if(a1?a1.index(a5)>-1:a(a5).is(a3)){return a5}a5=a5.parentNode}return null})},index:function(aU){if(!aU||typeof aU==="string"){return a.inArray(this[0],aU?a(aU):this.parent().children())}return a.inArray(aU.jquery?aU[0]:aU,this)},add:function(aU,aV){var aX=typeof aU==="string"?a(aU,aV||this.context):a.makeArray(aU),aW=a.merge(this.get(),aX);return this.pushStack(x(aX[0])||x(aW[0])?aW:a.unique(aW))},andSelf:function(){return this.add(this.prevObject)}});function x(aU){return !aU||!aU.parentNode||aU.parentNode.nodeType===11}a.each({parent:function(aV){var aU=aV.parentNode;return aU&&aU.nodeType!==11?aU:null},parents:function(aU){return a.dir(aU,"parentNode")},parentsUntil:function(aV,aU,aW){return a.dir(aV,"parentNode",aW)},next:function(aU){return a.nth(aU,2,"nextSibling")},prev:function(aU){return a.nth(aU,2,"previousSibling")},nextAll:function(aU){return a.dir(aU,"nextSibling")},prevAll:function(aU){return a.dir(aU,"previousSibling")},nextUntil:function(aV,aU,aW){return a.dir(aV,"nextSibling",aW)},prevUntil:function(aV,aU,aW){return a.dir(aV,"previousSibling",aW)},siblings:function(aU){return a.sibling(aU.parentNode.firstChild,aU)},children:function(aU){return a.sibling(aU.firstChild)},contents:function(aU){return a.nodeName(aU,"iframe")?aU.contentDocument||aU.contentWindow.document:a.makeArray(aU.childNodes)}},function(aU,aV){a.fn[aU]=function(aY,aW){var aX=a.map(this,aV,aY);if(!M.test(aU)){aW=aY}if(aW&&typeof aW==="string"){aX=a.filter(aW,aX)}aX=this.length>1?a.unique(aX):aX;if((this.length>1||aH.test(aW))&&X.test(aU)){aX=aX.reverse()}return this.pushStack(aX,aU,D.call(arguments).join(","))}});a.extend({filter:function(aW,aU,aV){if(aV){aW=":not("+aW+")"}return a.find.matches(aW,aU)},dir:function(aW,aV,aY){var aU=[],aX=aW[aV];while(aX&&aX.nodeType!==9&&(aY===B||aX.nodeType!==1||!a(aX).is(aY))){if(aX.nodeType===1){aU.push(aX)}aX=aX[aV]}return aU},nth:function(aY,aU,aW,aX){aU=aU||1;var aV=0;for(;aY;aY=aY[aW]){if(aY.nodeType===1&&++aV===aU){break}}return aY},sibling:function(aW,aV){var aU=[];for(;aW;aW=aW.nextSibling){if(aW.nodeType===1&&aW!==aV){aU.push(aW)}}return aU}});var S=/ jQuery\d+="(?:\d+|null)"/g,Y=/^\s+/,F=/(<([\w:]+)[^>]*?)\/>/g,aj=/^(?:area|br|col|embed|hr|img|input|link|meta|param)$/i,c=/<([\w:]+)/,t=/"},ab={option:[1,""],legend:[1,"
","
"],thead:[1,"","
"],tr:[2,"","
"],td:[3,"","
"],col:[2,"","
"],area:[1,"",""],_default:[0,"",""]};ab.optgroup=ab.option;ab.tbody=ab.tfoot=ab.colgroup=ab.caption=ab.thead;ab.th=ab.td;if(!a.support.htmlSerialize){ab._default=[1,"div
","
"]}a.fn.extend({text:function(aU){if(a.isFunction(aU)){return this.each(function(aW){var aV=a(this);aV.text(aU.call(this,aW,aV.text()))})}if(typeof aU!=="object"&&aU!==B){return this.empty().append((this[0]&&this[0].ownerDocument||aa).createTextNode(aU))}return a.getText(this)},wrapAll:function(aU){if(a.isFunction(aU)){return this.each(function(aW){a(this).wrapAll(aU.call(this,aW))})}if(this[0]){var aV=a(aU,this[0].ownerDocument).eq(0).clone(true);if(this[0].parentNode){aV.insertBefore(this[0])}aV.map(function(){var aW=this;while(aW.firstChild&&aW.firstChild.nodeType===1){aW=aW.firstChild}return aW}).append(this)}return this},wrapInner:function(aU){if(a.isFunction(aU)){return this.each(function(aV){a(this).wrapInner(aU.call(this,aV))})}return this.each(function(){var aV=a(this),aW=aV.contents();if(aW.length){aW.wrapAll(aU)}else{aV.append(aU)}})},wrap:function(aU){return this.each(function(){a(this).wrapAll(aU)})},unwrap:function(){return this.parent().each(function(){if(!a.nodeName(this,"body")){a(this).replaceWith(this.childNodes)}}).end()},append:function(){return this.domManip(arguments,true,function(aU){if(this.nodeType===1){this.appendChild(aU)}})},prepend:function(){return this.domManip(arguments,true,function(aU){if(this.nodeType===1){this.insertBefore(aU,this.firstChild)}})},before:function(){if(this[0]&&this[0].parentNode){return this.domManip(arguments,false,function(aV){this.parentNode.insertBefore(aV,this)})}else{if(arguments.length){var aU=a(arguments[0]);aU.push.apply(aU,this.toArray());return this.pushStack(aU,"before",arguments)}}},after:function(){if(this[0]&&this[0].parentNode){return this.domManip(arguments,false,function(aV){this.parentNode.insertBefore(aV,this.nextSibling)})}else{if(arguments.length){var aU=this.pushStack(this,"after",arguments);aU.push.apply(aU,a(arguments[0]).toArray());return aU}}},clone:function(aV){var aU=this.map(function(){if(!a.support.noCloneEvent&&!a.isXMLDoc(this)){var aX=this.outerHTML,aW=this.ownerDocument;if(!aX){var aY=aW.createElement("div");aY.appendChild(this.cloneNode(true));aX=aY.innerHTML}return a.clean([aX.replace(S,"").replace(Y,"")],aW)[0]}else{return this.cloneNode(true)}});if(aV===true){q(this,aU);q(this.find("*"),aU.find("*"))}return aU},html:function(aW){if(aW===B){return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(S,""):null}else{if(typeof aW==="string"&&!/ -