client.py 135 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601
  1. from __future__ import unicode_literals
  2. from itertools import chain
  3. import datetime
  4. import sys
  5. import warnings
  6. import time
  7. import threading
  8. import time as mod_time
  9. import hashlib
  10. from redis._compat import (basestring, bytes, imap, iteritems, iterkeys,
  11. itervalues, izip, long, nativestr, safe_unicode)
  12. from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
  13. SSLConnection, Token)
  14. from redis.lock import Lock
  15. from redis.exceptions import (
  16. ConnectionError,
  17. DataError,
  18. ExecAbortError,
  19. NoScriptError,
  20. PubSubError,
  21. RedisError,
  22. ResponseError,
  23. TimeoutError,
  24. WatchError,
  25. )
  26. SYM_EMPTY = b''
  27. EMPTY_RESPONSE = 'EMPTY_RESPONSE'
  28. def list_or_args(keys, args):
  29. # returns a single new list combining keys and args
  30. try:
  31. iter(keys)
  32. # a string or bytes instance can be iterated, but indicates
  33. # keys wasn't passed as a list
  34. if isinstance(keys, (basestring, bytes)):
  35. keys = [keys]
  36. else:
  37. keys = list(keys)
  38. except TypeError:
  39. keys = [keys]
  40. if args:
  41. keys.extend(args)
  42. return keys
  43. def timestamp_to_datetime(response):
  44. "Converts a unix timestamp to a Python datetime object"
  45. if not response:
  46. return None
  47. try:
  48. response = int(response)
  49. except ValueError:
  50. return None
  51. return datetime.datetime.fromtimestamp(response)
  52. def string_keys_to_dict(key_string, callback):
  53. return dict.fromkeys(key_string.split(), callback)
  54. def dict_merge(*dicts):
  55. merged = {}
  56. for d in dicts:
  57. merged.update(d)
  58. return merged
  59. def parse_debug_object(response):
  60. "Parse the results of Redis's DEBUG OBJECT command into a Python dict"
  61. # The 'type' of the object is the first item in the response, but isn't
  62. # prefixed with a name
  63. response = nativestr(response)
  64. response = 'type:' + response
  65. response = dict(kv.split(':') for kv in response.split())
  66. # parse some expected int values from the string response
  67. # note: this cmd isn't spec'd so these may not appear in all redis versions
  68. int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
  69. for field in int_fields:
  70. if field in response:
  71. response[field] = int(response[field])
  72. return response
  73. def parse_object(response, infotype):
  74. "Parse the results of an OBJECT command"
  75. if infotype in ('idletime', 'refcount'):
  76. return int_or_none(response)
  77. return response
  78. def parse_info(response):
  79. "Parse the result of Redis's INFO command into a Python dict"
  80. info = {}
  81. response = nativestr(response)
  82. def get_value(value):
  83. if ',' not in value or '=' not in value:
  84. try:
  85. if '.' in value:
  86. return float(value)
  87. else:
  88. return int(value)
  89. except ValueError:
  90. return value
  91. else:
  92. sub_dict = {}
  93. for item in value.split(','):
  94. k, v = item.rsplit('=', 1)
  95. sub_dict[k] = get_value(v)
  96. return sub_dict
  97. for line in response.splitlines():
  98. if line and not line.startswith('#'):
  99. if line.find(':') != -1:
  100. # support keys that include ':' by using rsplit
  101. key, value = line.rsplit(':', 1)
  102. info[key] = get_value(value)
  103. else:
  104. # if the line isn't splittable, append it to the "__raw__" key
  105. info.setdefault('__raw__', []).append(line)
  106. return info
  107. SENTINEL_STATE_TYPES = {
  108. 'can-failover-its-master': int,
  109. 'config-epoch': int,
  110. 'down-after-milliseconds': int,
  111. 'failover-timeout': int,
  112. 'info-refresh': int,
  113. 'last-hello-message': int,
  114. 'last-ok-ping-reply': int,
  115. 'last-ping-reply': int,
  116. 'last-ping-sent': int,
  117. 'master-link-down-time': int,
  118. 'master-port': int,
  119. 'num-other-sentinels': int,
  120. 'num-slaves': int,
  121. 'o-down-time': int,
  122. 'pending-commands': int,
  123. 'parallel-syncs': int,
  124. 'port': int,
  125. 'quorum': int,
  126. 'role-reported-time': int,
  127. 's-down-time': int,
  128. 'slave-priority': int,
  129. 'slave-repl-offset': int,
  130. 'voted-leader-epoch': int
  131. }
  132. def parse_sentinel_state(item):
  133. result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
  134. flags = set(result['flags'].split(','))
  135. for name, flag in (('is_master', 'master'), ('is_slave', 'slave'),
  136. ('is_sdown', 's_down'), ('is_odown', 'o_down'),
  137. ('is_sentinel', 'sentinel'),
  138. ('is_disconnected', 'disconnected'),
  139. ('is_master_down', 'master_down')):
  140. result[name] = flag in flags
  141. return result
  142. def parse_sentinel_master(response):
  143. return parse_sentinel_state(imap(nativestr, response))
  144. def parse_sentinel_masters(response):
  145. result = {}
  146. for item in response:
  147. state = parse_sentinel_state(imap(nativestr, item))
  148. result[state['name']] = state
  149. return result
  150. def parse_sentinel_slaves_and_sentinels(response):
  151. return [parse_sentinel_state(imap(nativestr, item)) for item in response]
  152. def parse_sentinel_get_master(response):
  153. return response and (response[0], int(response[1])) or None
  154. def pairs_to_dict(response, decode_keys=False):
  155. "Create a dict given a list of key/value pairs"
  156. if decode_keys:
  157. # the iter form is faster, but I don't know how to make that work
  158. # with a nativestr() map
  159. return dict(izip(imap(nativestr, response[::2]), response[1::2]))
  160. else:
  161. it = iter(response)
  162. return dict(izip(it, it))
  163. def pairs_to_dict_typed(response, type_info):
  164. it = iter(response)
  165. result = {}
  166. for key, value in izip(it, it):
  167. if key in type_info:
  168. try:
  169. value = type_info[key](value)
  170. except Exception:
  171. # if for some reason the value can't be coerced, just use
  172. # the string value
  173. pass
  174. result[key] = value
  175. return result
  176. def zset_score_pairs(response, **options):
  177. """
  178. If ``withscores`` is specified in the options, return the response as
  179. a list of (value, score) pairs
  180. """
  181. if not response or not options.get('withscores'):
  182. return response
  183. score_cast_func = options.get('score_cast_func', float)
  184. it = iter(response)
  185. return list(izip(it, imap(score_cast_func, it)))
  186. def sort_return_tuples(response, **options):
  187. """
  188. If ``groups`` is specified, return the response as a list of
  189. n-element tuples with n being the value found in options['groups']
  190. """
  191. if not response or not options.get('groups'):
  192. return response
  193. n = options['groups']
  194. return list(izip(*[response[i::n] for i in range(n)]))
  195. def int_or_none(response):
  196. if response is None:
  197. return None
  198. return int(response)
  199. def parse_stream_list(response):
  200. if response is None:
  201. return None
  202. return [(r[0], pairs_to_dict(r[1])) for r in response]
  203. def pairs_to_dict_with_nativestr_keys(response):
  204. return pairs_to_dict(response, decode_keys=True)
  205. def parse_list_of_dicts(response):
  206. return list(imap(pairs_to_dict_with_nativestr_keys, response))
  207. def parse_xclaim(response, **options):
  208. if options.get('parse_justid', False):
  209. return response
  210. return parse_stream_list(response)
  211. def parse_xinfo_stream(response):
  212. data = pairs_to_dict(response, decode_keys=True)
  213. first = data['first-entry']
  214. data['first-entry'] = (first[0], pairs_to_dict(first[1]))
  215. last = data['last-entry']
  216. data['last-entry'] = (last[0], pairs_to_dict(last[1]))
  217. return data
  218. def parse_xread(response):
  219. if response is None:
  220. return []
  221. return [[nativestr(r[0]), parse_stream_list(r[1])] for r in response]
  222. def parse_xpending(response, **options):
  223. if options.get('parse_detail', False):
  224. return parse_xpending_range(response)
  225. consumers = [{'name': n, 'pending': long(p)} for n, p in response[3] or []]
  226. return {
  227. 'pending': response[0],
  228. 'min': response[1],
  229. 'max': response[2],
  230. 'consumers': consumers
  231. }
  232. def parse_xpending_range(response):
  233. k = ('message_id', 'consumer', 'time_since_delivered', 'times_delivered')
  234. return [dict(izip(k, r)) for r in response]
  235. def float_or_none(response):
  236. if response is None:
  237. return None
  238. return float(response)
  239. def bool_ok(response):
  240. return nativestr(response) == 'OK'
  241. def parse_zadd(response, **options):
  242. if options.get('as_score'):
  243. return float(response)
  244. return int(response)
  245. def parse_client_list(response, **options):
  246. clients = []
  247. for c in nativestr(response).splitlines():
  248. # Values might contain '='
  249. clients.append(dict(pair.split('=', 1) for pair in c.split(' ')))
  250. return clients
  251. def parse_config_get(response, **options):
  252. response = [nativestr(i) if i is not None else None for i in response]
  253. return response and pairs_to_dict(response) or {}
  254. def parse_scan(response, **options):
  255. cursor, r = response
  256. return long(cursor), r
  257. def parse_hscan(response, **options):
  258. cursor, r = response
  259. return long(cursor), r and pairs_to_dict(r) or {}
  260. def parse_zscan(response, **options):
  261. score_cast_func = options.get('score_cast_func', float)
  262. cursor, r = response
  263. it = iter(r)
  264. return long(cursor), list(izip(it, imap(score_cast_func, it)))
  265. def parse_slowlog_get(response, **options):
  266. return [{
  267. 'id': item[0],
  268. 'start_time': int(item[1]),
  269. 'duration': int(item[2]),
  270. 'command': b' '.join(item[3])
  271. } for item in response]
  272. def parse_cluster_info(response, **options):
  273. response = nativestr(response)
  274. return dict(line.split(':') for line in response.splitlines() if line)
  275. def _parse_node_line(line):
  276. line_items = line.split(' ')
  277. node_id, addr, flags, master_id, ping, pong, epoch, \
  278. connected = line.split(' ')[:8]
  279. slots = [sl.split('-') for sl in line_items[8:]]
  280. node_dict = {
  281. 'node_id': node_id,
  282. 'flags': flags,
  283. 'master_id': master_id,
  284. 'last_ping_sent': ping,
  285. 'last_pong_rcvd': pong,
  286. 'epoch': epoch,
  287. 'slots': slots,
  288. 'connected': True if connected == 'connected' else False
  289. }
  290. return addr, node_dict
  291. def parse_cluster_nodes(response, **options):
  292. response = nativestr(response)
  293. raw_lines = response
  294. if isinstance(response, basestring):
  295. raw_lines = response.splitlines()
  296. return dict(_parse_node_line(line) for line in raw_lines)
  297. def parse_georadius_generic(response, **options):
  298. if options['store'] or options['store_dist']:
  299. # `store` and `store_diff` cant be combined
  300. # with other command arguments.
  301. return response
  302. if type(response) != list:
  303. response_list = [response]
  304. else:
  305. response_list = response
  306. if not options['withdist'] and not options['withcoord']\
  307. and not options['withhash']:
  308. # just a bunch of places
  309. return [nativestr(r) for r in response_list]
  310. cast = {
  311. 'withdist': float,
  312. 'withcoord': lambda ll: (float(ll[0]), float(ll[1])),
  313. 'withhash': int
  314. }
  315. # zip all output results with each casting functino to get
  316. # the properly native Python value.
  317. f = [nativestr]
  318. f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]]
  319. return [
  320. list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list
  321. ]
  322. def parse_pubsub_numsub(response, **options):
  323. return list(zip(response[0::2], response[1::2]))
  324. class Redis(object):
  325. """
  326. Implementation of the Redis protocol.
  327. This abstract class provides a Python interface to all Redis commands
  328. and an implementation of the Redis protocol.
  329. Connection and Pipeline derive from this, implementing how
  330. the commands are sent and received to the Redis server
  331. """
  332. RESPONSE_CALLBACKS = dict_merge(
  333. string_keys_to_dict(
  334. 'AUTH EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST '
  335. 'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',
  336. bool
  337. ),
  338. string_keys_to_dict(
  339. 'BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN '
  340. 'HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD '
  341. 'SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN '
  342. 'SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM '
  343. 'ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',
  344. int
  345. ),
  346. string_keys_to_dict(
  347. 'INCRBYFLOAT HINCRBYFLOAT',
  348. float
  349. ),
  350. string_keys_to_dict(
  351. # these return OK, or int if redis-server is >=1.3.4
  352. 'LPUSH RPUSH',
  353. lambda r: isinstance(r, (long, int)) and r or nativestr(r) == 'OK'
  354. ),
  355. string_keys_to_dict('SORT', sort_return_tuples),
  356. string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none),
  357. string_keys_to_dict(
  358. 'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE RENAME '
  359. 'SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH',
  360. bool_ok
  361. ),
  362. string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
  363. string_keys_to_dict(
  364. 'SDIFF SINTER SMEMBERS SUNION',
  365. lambda r: r and set(r) or set()
  366. ),
  367. string_keys_to_dict(
  368. 'ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',
  369. zset_score_pairs
  370. ),
  371. string_keys_to_dict('BZPOPMIN BZPOPMAX', \
  372. lambda r: r and (r[0], r[1], float(r[2])) or None),
  373. string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
  374. string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list),
  375. string_keys_to_dict('XREAD XREADGROUP', parse_xread),
  376. string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
  377. {
  378. 'CLIENT GETNAME': lambda r: r and nativestr(r),
  379. 'CLIENT ID': int,
  380. 'CLIENT KILL': bool_ok,
  381. 'CLIENT LIST': parse_client_list,
  382. 'CLIENT SETNAME': bool_ok,
  383. 'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False,
  384. 'CLIENT PAUSE': bool_ok,
  385. 'CLUSTER ADDSLOTS': bool_ok,
  386. 'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x),
  387. 'CLUSTER COUNTKEYSINSLOT': lambda x: int(x),
  388. 'CLUSTER DELSLOTS': bool_ok,
  389. 'CLUSTER FAILOVER': bool_ok,
  390. 'CLUSTER FORGET': bool_ok,
  391. 'CLUSTER INFO': parse_cluster_info,
  392. 'CLUSTER KEYSLOT': lambda x: int(x),
  393. 'CLUSTER MEET': bool_ok,
  394. 'CLUSTER NODES': parse_cluster_nodes,
  395. 'CLUSTER REPLICATE': bool_ok,
  396. 'CLUSTER RESET': bool_ok,
  397. 'CLUSTER SAVECONFIG': bool_ok,
  398. 'CLUSTER SET-CONFIG-EPOCH': bool_ok,
  399. 'CLUSTER SETSLOT': bool_ok,
  400. 'CLUSTER SLAVES': parse_cluster_nodes,
  401. 'CONFIG GET': parse_config_get,
  402. 'CONFIG RESETSTAT': bool_ok,
  403. 'CONFIG SET': bool_ok,
  404. 'DEBUG OBJECT': parse_debug_object,
  405. 'GEOHASH': lambda r: list(map(nativestr, r)),
  406. 'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]),
  407. float(ll[1]))
  408. if ll is not None else None, r)),
  409. 'GEORADIUS': parse_georadius_generic,
  410. 'GEORADIUSBYMEMBER': parse_georadius_generic,
  411. 'HGETALL': lambda r: r and pairs_to_dict(r) or {},
  412. 'HSCAN': parse_hscan,
  413. 'INFO': parse_info,
  414. 'LASTSAVE': timestamp_to_datetime,
  415. 'MEMORY PURGE': bool_ok,
  416. 'MEMORY USAGE': int_or_none,
  417. 'OBJECT': parse_object,
  418. 'PING': lambda r: nativestr(r) == 'PONG',
  419. 'PUBSUB NUMSUB': parse_pubsub_numsub,
  420. 'RANDOMKEY': lambda r: r and r or None,
  421. 'SCAN': parse_scan,
  422. 'SCRIPT EXISTS': lambda r: list(imap(bool, r)),
  423. 'SCRIPT FLUSH': bool_ok,
  424. 'SCRIPT KILL': bool_ok,
  425. 'SCRIPT LOAD': nativestr,
  426. 'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,
  427. 'SENTINEL MASTER': parse_sentinel_master,
  428. 'SENTINEL MASTERS': parse_sentinel_masters,
  429. 'SENTINEL MONITOR': bool_ok,
  430. 'SENTINEL REMOVE': bool_ok,
  431. 'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,
  432. 'SENTINEL SET': bool_ok,
  433. 'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,
  434. 'SET': lambda r: r and nativestr(r) == 'OK',
  435. 'SLOWLOG GET': parse_slowlog_get,
  436. 'SLOWLOG LEN': int,
  437. 'SLOWLOG RESET': bool_ok,
  438. 'SSCAN': parse_scan,
  439. 'TIME': lambda x: (int(x[0]), int(x[1])),
  440. 'XCLAIM': parse_xclaim,
  441. 'XGROUP CREATE': bool_ok,
  442. 'XGROUP DELCONSUMER': int,
  443. 'XGROUP DESTROY': bool,
  444. 'XGROUP SETID': bool_ok,
  445. 'XINFO CONSUMERS': parse_list_of_dicts,
  446. 'XINFO GROUPS': parse_list_of_dicts,
  447. 'XINFO STREAM': parse_xinfo_stream,
  448. 'XPENDING': parse_xpending,
  449. 'ZADD': parse_zadd,
  450. 'ZSCAN': parse_zscan,
  451. }
  452. )
  453. @classmethod
  454. def from_url(cls, url, db=None, **kwargs):
  455. """
  456. Return a Redis client object configured from the given URL
  457. For example::
  458. redis://[:password]@localhost:6379/0
  459. rediss://[:password]@localhost:6379/0
  460. unix://[:password]@/path/to/socket.sock?db=0
  461. Three URL schemes are supported:
  462. - ```redis://``
  463. <http://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
  464. normal TCP socket connection
  465. - ```rediss://``
  466. <http://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates a
  467. SSL wrapped TCP socket connection
  468. - ``unix://`` creates a Unix Domain Socket connection
  469. There are several ways to specify a database number. The parse function
  470. will return the first specified option:
  471. 1. A ``db`` querystring option, e.g. redis://localhost?db=0
  472. 2. If using the redis:// scheme, the path argument of the url, e.g.
  473. redis://localhost/0
  474. 3. The ``db`` argument to this function.
  475. If none of these options are specified, db=0 is used.
  476. Any additional querystring arguments and keyword arguments will be
  477. passed along to the ConnectionPool class's initializer. In the case
  478. of conflicting arguments, querystring arguments always win.
  479. """
  480. connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)
  481. return cls(connection_pool=connection_pool)
  482. def __init__(self, host='localhost', port=6379,
  483. db=0, password=None, socket_timeout=None,
  484. socket_connect_timeout=None,
  485. socket_keepalive=None, socket_keepalive_options=None,
  486. connection_pool=None, unix_socket_path=None,
  487. encoding='utf-8', encoding_errors='strict',
  488. charset=None, errors=None,
  489. decode_responses=False, retry_on_timeout=False,
  490. ssl=False, ssl_keyfile=None, ssl_certfile=None,
  491. ssl_cert_reqs='required', ssl_ca_certs=None,
  492. max_connections=None):
  493. if not connection_pool:
  494. if charset is not None:
  495. warnings.warn(DeprecationWarning(
  496. '"charset" is deprecated. Use "encoding" instead'))
  497. encoding = charset
  498. if errors is not None:
  499. warnings.warn(DeprecationWarning(
  500. '"errors" is deprecated. Use "encoding_errors" instead'))
  501. encoding_errors = errors
  502. kwargs = {
  503. 'db': db,
  504. 'password': password,
  505. 'socket_timeout': socket_timeout,
  506. 'encoding': encoding,
  507. 'encoding_errors': encoding_errors,
  508. 'decode_responses': decode_responses,
  509. 'retry_on_timeout': retry_on_timeout,
  510. 'max_connections': max_connections
  511. }
  512. # based on input, setup appropriate connection args
  513. if unix_socket_path is not None:
  514. kwargs.update({
  515. 'path': unix_socket_path,
  516. 'connection_class': UnixDomainSocketConnection
  517. })
  518. else:
  519. # TCP specific options
  520. kwargs.update({
  521. 'host': host,
  522. 'port': port,
  523. 'socket_connect_timeout': socket_connect_timeout,
  524. 'socket_keepalive': socket_keepalive,
  525. 'socket_keepalive_options': socket_keepalive_options,
  526. })
  527. if ssl:
  528. kwargs.update({
  529. 'connection_class': SSLConnection,
  530. 'ssl_keyfile': ssl_keyfile,
  531. 'ssl_certfile': ssl_certfile,
  532. 'ssl_cert_reqs': ssl_cert_reqs,
  533. 'ssl_ca_certs': ssl_ca_certs,
  534. })
  535. connection_pool = ConnectionPool(**kwargs)
  536. self.connection_pool = connection_pool
  537. self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy()
  538. def __repr__(self):
  539. return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))
  540. def set_response_callback(self, command, callback):
  541. "Set a custom Response Callback"
  542. self.response_callbacks[command] = callback
  543. def pipeline(self, transaction=True, shard_hint=None):
  544. """
  545. Return a new pipeline object that can queue multiple commands for
  546. later execution. ``transaction`` indicates whether all commands
  547. should be executed atomically. Apart from making a group of operations
  548. atomic, pipelines are useful for reducing the back-and-forth overhead
  549. between the client and server.
  550. """
  551. return Pipeline(
  552. self.connection_pool,
  553. self.response_callbacks,
  554. transaction,
  555. shard_hint)
  556. def transaction(self, func, *watches, **kwargs):
  557. """
  558. Convenience method for executing the callable `func` as a transaction
  559. while watching all keys specified in `watches`. The 'func' callable
  560. should expect a single argument which is a Pipeline object.
  561. """
  562. shard_hint = kwargs.pop('shard_hint', None)
  563. value_from_callable = kwargs.pop('value_from_callable', False)
  564. watch_delay = kwargs.pop('watch_delay', None)
  565. with self.pipeline(True, shard_hint) as pipe:
  566. while True:
  567. try:
  568. if watches:
  569. pipe.watch(*watches)
  570. func_value = func(pipe)
  571. exec_value = pipe.execute()
  572. return func_value if value_from_callable else exec_value
  573. except WatchError:
  574. if watch_delay is not None and watch_delay > 0:
  575. time.sleep(watch_delay)
  576. continue
  577. def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
  578. lock_class=None, thread_local=True):
  579. """
  580. Return a new Lock object using key ``name`` that mimics
  581. the behavior of threading.Lock.
  582. If specified, ``timeout`` indicates a maximum life for the lock.
  583. By default, it will remain locked until release() is called.
  584. ``sleep`` indicates the amount of time to sleep per loop iteration
  585. when the lock is in blocking mode and another client is currently
  586. holding the lock.
  587. ``blocking_timeout`` indicates the maximum amount of time in seconds to
  588. spend trying to acquire the lock. A value of ``None`` indicates
  589. continue trying forever. ``blocking_timeout`` can be specified as a
  590. float or integer, both representing the number of seconds to wait.
  591. ``lock_class`` forces the specified lock implementation.
  592. ``thread_local`` indicates whether the lock token is placed in
  593. thread-local storage. By default, the token is placed in thread local
  594. storage so that a thread only sees its token, not a token set by
  595. another thread. Consider the following timeline:
  596. time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
  597. thread-1 sets the token to "abc"
  598. time: 1, thread-2 blocks trying to acquire `my-lock` using the
  599. Lock instance.
  600. time: 5, thread-1 has not yet completed. redis expires the lock
  601. key.
  602. time: 5, thread-2 acquired `my-lock` now that it's available.
  603. thread-2 sets the token to "xyz"
  604. time: 6, thread-1 finishes its work and calls release(). if the
  605. token is *not* stored in thread local storage, then
  606. thread-1 would see the token value as "xyz" and would be
  607. able to successfully release the thread-2's lock.
  608. In some use cases it's necessary to disable thread local storage. For
  609. example, if you have code where one thread acquires a lock and passes
  610. that lock instance to a worker thread to release later. If thread
  611. local storage isn't disabled in this case, the worker thread won't see
  612. the token set by the thread that acquired the lock. Our assumption
  613. is that these cases aren't common and as such default to using
  614. thread local storage. """
  615. if lock_class is None:
  616. lock_class = Lock
  617. return lock_class(self, name, timeout=timeout, sleep=sleep,
  618. blocking_timeout=blocking_timeout,
  619. thread_local=thread_local)
  620. def pubsub(self, **kwargs):
  621. """
  622. Return a Publish/Subscribe object. With this object, you can
  623. subscribe to channels and listen for messages that get published to
  624. them.
  625. """
  626. return PubSub(self.connection_pool, **kwargs)
  627. # COMMAND EXECUTION AND PROTOCOL PARSING
  628. def execute_command(self, *args, **options):
  629. "Execute a command and return a parsed response"
  630. pool = self.connection_pool
  631. command_name = args[0]
  632. connection = pool.get_connection(command_name, **options)
  633. try:
  634. connection.send_command(*args)
  635. return self.parse_response(connection, command_name, **options)
  636. except (ConnectionError, TimeoutError) as e:
  637. connection.disconnect()
  638. if not connection.retry_on_timeout and isinstance(e, TimeoutError):
  639. raise
  640. connection.send_command(*args)
  641. return self.parse_response(connection, command_name, **options)
  642. finally:
  643. pool.release(connection)
  644. def parse_response(self, connection, command_name, **options):
  645. "Parses a response from the Redis server"
  646. try:
  647. response = connection.read_response()
  648. except ResponseError:
  649. if EMPTY_RESPONSE in options:
  650. return options[EMPTY_RESPONSE]
  651. raise
  652. if command_name in self.response_callbacks:
  653. return self.response_callbacks[command_name](response, **options)
  654. return response
  655. # SERVER INFORMATION
  656. def bgrewriteaof(self):
  657. "Tell the Redis server to rewrite the AOF file from data in memory."
  658. return self.execute_command('BGREWRITEAOF')
  659. def bgsave(self):
  660. """
  661. Tell the Redis server to save its data to disk. Unlike save(),
  662. this method is asynchronous and returns immediately.
  663. """
  664. return self.execute_command('BGSAVE')
  665. def client_kill(self, address):
  666. "Disconnects the client at ``address`` (ip:port)"
  667. return self.execute_command('CLIENT KILL', address)
  668. def client_list(self, _type=None):
  669. """
  670. Returns a list of currently connected clients.
  671. If type of client specified, only that type will be returned.
  672. :param _type: optional. one of the client types (normal, master,
  673. replica, pubsub)
  674. """
  675. "Returns a list of currently connected clients"
  676. if _type is not None:
  677. client_types = ('normal', 'master', 'replica', 'pubsub')
  678. if str(_type).lower() not in client_types:
  679. raise DataError("CLIENT LIST _type must be one of %r" % (
  680. client_types,))
  681. return self.execute_command('CLIENT LIST', Token.get_token('TYPE'),
  682. _type)
  683. return self.execute_command('CLIENT LIST')
  684. def client_getname(self):
  685. "Returns the current connection name"
  686. return self.execute_command('CLIENT GETNAME')
  687. def client_id(self):
  688. "Returns the current connection id"
  689. return self.execute_command('CLIENT ID')
  690. def client_setname(self, name):
  691. "Sets the current connection name"
  692. return self.execute_command('CLIENT SETNAME', name)
  693. def client_unblock(self, client_id, error=False):
  694. """
  695. Unblocks a connection by its client id.
  696. If ``error`` is True, unblocks the client with a special error message.
  697. If ``error`` is False (default), the client is unblocked using the
  698. regular timeout mechanism.
  699. """
  700. args = ['CLIENT UNBLOCK', int(client_id)]
  701. if error:
  702. args.append(Token.get_token('ERROR'))
  703. return self.execute_command(*args)
  704. def client_pause(self, timeout):
  705. """
  706. Suspend all the Redis clients for the specified amount of time
  707. :param timeout: milliseconds to pause clients
  708. """
  709. if not isinstance(timeout, (int, long)):
  710. raise DataError("CLIENT PAUSE timeout must be an integer")
  711. return self.execute_command('CLIENT PAUSE', str(timeout))
  712. def config_get(self, pattern="*"):
  713. "Return a dictionary of configuration based on the ``pattern``"
  714. return self.execute_command('CONFIG GET', pattern)
  715. def config_set(self, name, value):
  716. "Set config item ``name`` with ``value``"
  717. return self.execute_command('CONFIG SET', name, value)
  718. def config_resetstat(self):
  719. "Reset runtime statistics"
  720. return self.execute_command('CONFIG RESETSTAT')
  721. def config_rewrite(self):
  722. "Rewrite config file with the minimal change to reflect running config"
  723. return self.execute_command('CONFIG REWRITE')
  724. def dbsize(self):
  725. "Returns the number of keys in the current database"
  726. return self.execute_command('DBSIZE')
  727. def debug_object(self, key):
  728. "Returns version specific meta information about a given key"
  729. return self.execute_command('DEBUG OBJECT', key)
  730. def echo(self, value):
  731. "Echo the string back from the server"
  732. return self.execute_command('ECHO', value)
  733. def flushall(self, asynchronous=False):
  734. """
  735. Delete all keys in all databases on the current host.
  736. ``asynchronous`` indicates whether the operation is
  737. executed asynchronously by the server.
  738. """
  739. args = []
  740. if asynchronous:
  741. args.append(Token.get_token('ASYNC'))
  742. return self.execute_command('FLUSHALL', *args)
  743. def flushdb(self, asynchronous=False):
  744. """
  745. Delete all keys in the current database.
  746. ``asynchronous`` indicates whether the operation is
  747. executed asynchronously by the server.
  748. """
  749. args = []
  750. if asynchronous:
  751. args.append(Token.get_token('ASYNC'))
  752. return self.execute_command('FLUSHDB', *args)
  753. def swapdb(self, first, second):
  754. "Swap two databases"
  755. return self.execute_command('SWAPDB', first, second)
  756. def info(self, section=None):
  757. """
  758. Returns a dictionary containing information about the Redis server
  759. The ``section`` option can be used to select a specific section
  760. of information
  761. The section option is not supported by older versions of Redis Server,
  762. and will generate ResponseError
  763. """
  764. if section is None:
  765. return self.execute_command('INFO')
  766. else:
  767. return self.execute_command('INFO', section)
  768. def lastsave(self):
  769. """
  770. Return a Python datetime object representing the last time the
  771. Redis database was saved to disk
  772. """
  773. return self.execute_command('LASTSAVE')
  774. def migrate(self, host, port, keys, destination_db, timeout,
  775. copy=False, replace=False, auth=None):
  776. """
  777. Migrate 1 or more keys from the current Redis server to a different
  778. server specified by the ``host``, ``port`` and ``destination_db``.
  779. The ``timeout``, specified in milliseconds, indicates the maximum
  780. time the connection between the two servers can be idle before the
  781. command is interrupted.
  782. If ``copy`` is True, the specified ``keys`` are NOT deleted from
  783. the source server.
  784. If ``replace`` is True, this operation will overwrite the keys
  785. on the destination server if they exist.
  786. If ``auth`` is specified, authenticate to the destination server with
  787. the password provided.
  788. """
  789. keys = list_or_args(keys, [])
  790. if not keys:
  791. raise DataError('MIGRATE requires at least one key')
  792. pieces = []
  793. if copy:
  794. pieces.append(Token.get_token('COPY'))
  795. if replace:
  796. pieces.append(Token.get_token('REPLACE'))
  797. if auth:
  798. pieces.append(Token.get_token('AUTH'))
  799. pieces.append(auth)
  800. pieces.append(Token.get_token('KEYS'))
  801. pieces.extend(keys)
  802. return self.execute_command('MIGRATE', host, port, '', destination_db,
  803. timeout, *pieces)
  804. def object(self, infotype, key):
  805. "Return the encoding, idletime, or refcount about the key"
  806. return self.execute_command('OBJECT', infotype, key, infotype=infotype)
  807. def memory_usage(self, key, samples=None):
  808. """
  809. Return the total memory usage for key, its value and associated
  810. administrative overheads.
  811. For nested data structures, ``samples`` is the number of elements to
  812. sample. If left unspecified, the server's default is 5. Use 0 to sample
  813. all elements.
  814. """
  815. args = []
  816. if isinstance(samples, int):
  817. args.extend([Token.get_token('SAMPLES'), samples])
  818. return self.execute_command('MEMORY USAGE', key, *args)
  819. def memory_purge(self):
  820. "Attempts to purge dirty pages for reclamation by allocator"
  821. return self.execute_command('MEMORY PURGE')
  822. def ping(self):
  823. "Ping the Redis server"
  824. return self.execute_command('PING')
  825. def save(self):
  826. """
  827. Tell the Redis server to save its data to disk,
  828. blocking until the save is complete
  829. """
  830. return self.execute_command('SAVE')
  831. def sentinel(self, *args):
  832. "Redis Sentinel's SENTINEL command."
  833. warnings.warn(
  834. DeprecationWarning('Use the individual sentinel_* methods'))
  835. def sentinel_get_master_addr_by_name(self, service_name):
  836. "Returns a (host, port) pair for the given ``service_name``"
  837. return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
  838. service_name)
  839. def sentinel_master(self, service_name):
  840. "Returns a dictionary containing the specified masters state."
  841. return self.execute_command('SENTINEL MASTER', service_name)
  842. def sentinel_masters(self):
  843. "Returns a list of dictionaries containing each master's state."
  844. return self.execute_command('SENTINEL MASTERS')
  845. def sentinel_monitor(self, name, ip, port, quorum):
  846. "Add a new master to Sentinel to be monitored"
  847. return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
  848. def sentinel_remove(self, name):
  849. "Remove a master from Sentinel's monitoring"
  850. return self.execute_command('SENTINEL REMOVE', name)
  851. def sentinel_sentinels(self, service_name):
  852. "Returns a list of sentinels for ``service_name``"
  853. return self.execute_command('SENTINEL SENTINELS', service_name)
  854. def sentinel_set(self, name, option, value):
  855. "Set Sentinel monitoring parameters for a given master"
  856. return self.execute_command('SENTINEL SET', name, option, value)
  857. def sentinel_slaves(self, service_name):
  858. "Returns a list of slaves for ``service_name``"
  859. return self.execute_command('SENTINEL SLAVES', service_name)
  860. def shutdown(self, save=False, nosave=False):
  861. """Shutdown the Redis server. If Redis has persistence configured,
  862. data will be flushed before shutdown. If the "save" option is set,
  863. a data flush will be attempted even if there is no persistence
  864. configured. If the "nosave" option is set, no data flush will be
  865. attempted. The "save" and "nosave" options cannot both be set.
  866. """
  867. if save and nosave:
  868. raise DataError('SHUTDOWN save and nosave cannot both be set')
  869. args = ['SHUTDOWN']
  870. if save:
  871. args.append('SAVE')
  872. if nosave:
  873. args.append('NOSAVE')
  874. try:
  875. self.execute_command(*args)
  876. except ConnectionError:
  877. # a ConnectionError here is expected
  878. return
  879. raise RedisError("SHUTDOWN seems to have failed.")
  880. def slaveof(self, host=None, port=None):
  881. """
  882. Set the server to be a replicated slave of the instance identified
  883. by the ``host`` and ``port``. If called without arguments, the
  884. instance is promoted to a master instead.
  885. """
  886. if host is None and port is None:
  887. return self.execute_command('SLAVEOF', Token.get_token('NO'),
  888. Token.get_token('ONE'))
  889. return self.execute_command('SLAVEOF', host, port)
  890. def slowlog_get(self, num=None):
  891. """
  892. Get the entries from the slowlog. If ``num`` is specified, get the
  893. most recent ``num`` items.
  894. """
  895. args = ['SLOWLOG GET']
  896. if num is not None:
  897. args.append(num)
  898. return self.execute_command(*args)
  899. def slowlog_len(self):
  900. "Get the number of items in the slowlog"
  901. return self.execute_command('SLOWLOG LEN')
  902. def slowlog_reset(self):
  903. "Remove all items in the slowlog"
  904. return self.execute_command('SLOWLOG RESET')
  905. def time(self):
  906. """
  907. Returns the server time as a 2-item tuple of ints:
  908. (seconds since epoch, microseconds into this second).
  909. """
  910. return self.execute_command('TIME')
  911. def wait(self, num_replicas, timeout):
  912. """
  913. Redis synchronous replication
  914. That returns the number of replicas that processed the query when
  915. we finally have at least ``num_replicas``, or when the ``timeout`` was
  916. reached.
  917. """
  918. return self.execute_command('WAIT', num_replicas, timeout)
  919. # BASIC KEY COMMANDS
  920. def append(self, key, value):
  921. """
  922. Appends the string ``value`` to the value at ``key``. If ``key``
  923. doesn't already exist, create it with a value of ``value``.
  924. Returns the new length of the value at ``key``.
  925. """
  926. return self.execute_command('APPEND', key, value)
  927. def bitcount(self, key, start=None, end=None):
  928. """
  929. Returns the count of set bits in the value of ``key``. Optional
  930. ``start`` and ``end`` paramaters indicate which bytes to consider
  931. """
  932. params = [key]
  933. if start is not None and end is not None:
  934. params.append(start)
  935. params.append(end)
  936. elif (start is not None and end is None) or \
  937. (end is not None and start is None):
  938. raise DataError("Both start and end must be specified")
  939. return self.execute_command('BITCOUNT', *params)
  940. def bitfield(self, key, default_overflow=None):
  941. """
  942. Return a BitFieldOperation instance to conveniently construct one or
  943. more bitfield operations on ``key``.
  944. """
  945. return BitFieldOperation(self, key, default_overflow=default_overflow)
  946. def bitop(self, operation, dest, *keys):
  947. """
  948. Perform a bitwise operation using ``operation`` between ``keys`` and
  949. store the result in ``dest``.
  950. """
  951. return self.execute_command('BITOP', operation, dest, *keys)
  952. def bitpos(self, key, bit, start=None, end=None):
  953. """
  954. Return the position of the first bit set to 1 or 0 in a string.
  955. ``start`` and ``end`` difines search range. The range is interpreted
  956. as a range of bytes and not a range of bits, so start=0 and end=2
  957. means to look at the first three bytes.
  958. """
  959. if bit not in (0, 1):
  960. raise DataError('bit must be 0 or 1')
  961. params = [key, bit]
  962. start is not None and params.append(start)
  963. if start is not None and end is not None:
  964. params.append(end)
  965. elif start is None and end is not None:
  966. raise DataError("start argument is not set, "
  967. "when end is specified")
  968. return self.execute_command('BITPOS', *params)
  969. def decr(self, name, amount=1):
  970. """
  971. Decrements the value of ``key`` by ``amount``. If no key exists,
  972. the value will be initialized as 0 - ``amount``
  973. """
  974. # An alias for ``decr()``, because it is already implemented
  975. # as DECRBY redis command.
  976. return self.decrby(name, amount)
  977. def decrby(self, name, amount=1):
  978. """
  979. Decrements the value of ``key`` by ``amount``. If no key exists,
  980. the value will be initialized as 0 - ``amount``
  981. """
  982. return self.execute_command('DECRBY', name, amount)
  983. def delete(self, *names):
  984. "Delete one or more keys specified by ``names``"
  985. return self.execute_command('DEL', *names)
  986. def __delitem__(self, name):
  987. self.delete(name)
  988. def dump(self, name):
  989. """
  990. Return a serialized version of the value stored at the specified key.
  991. If key does not exist a nil bulk reply is returned.
  992. """
  993. return self.execute_command('DUMP', name)
  994. def exists(self, *names):
  995. "Returns the number of ``names`` that exist"
  996. return self.execute_command('EXISTS', *names)
  997. __contains__ = exists
  998. def expire(self, name, time):
  999. """
  1000. Set an expire flag on key ``name`` for ``time`` seconds. ``time``
  1001. can be represented by an integer or a Python timedelta object.
  1002. """
  1003. if isinstance(time, datetime.timedelta):
  1004. time = int(time.total_seconds())
  1005. return self.execute_command('EXPIRE', name, time)
  1006. def expireat(self, name, when):
  1007. """
  1008. Set an expire flag on key ``name``. ``when`` can be represented
  1009. as an integer indicating unix time or a Python datetime object.
  1010. """
  1011. if isinstance(when, datetime.datetime):
  1012. when = int(mod_time.mktime(when.timetuple()))
  1013. return self.execute_command('EXPIREAT', name, when)
  1014. def get(self, name):
  1015. """
  1016. Return the value at key ``name``, or None if the key doesn't exist
  1017. """
  1018. return self.execute_command('GET', name)
  1019. def __getitem__(self, name):
  1020. """
  1021. Return the value at key ``name``, raises a KeyError if the key
  1022. doesn't exist.
  1023. """
  1024. value = self.get(name)
  1025. if value is not None:
  1026. return value
  1027. raise KeyError(name)
  1028. def getbit(self, name, offset):
  1029. "Returns a boolean indicating the value of ``offset`` in ``name``"
  1030. return self.execute_command('GETBIT', name, offset)
  1031. def getrange(self, key, start, end):
  1032. """
  1033. Returns the substring of the string value stored at ``key``,
  1034. determined by the offsets ``start`` and ``end`` (both are inclusive)
  1035. """
  1036. return self.execute_command('GETRANGE', key, start, end)
  1037. def getset(self, name, value):
  1038. """
  1039. Sets the value at key ``name`` to ``value``
  1040. and returns the old value at key ``name`` atomically.
  1041. """
  1042. return self.execute_command('GETSET', name, value)
  1043. def incr(self, name, amount=1):
  1044. """
  1045. Increments the value of ``key`` by ``amount``. If no key exists,
  1046. the value will be initialized as ``amount``
  1047. """
  1048. return self.incrby(name, amount)
  1049. def incrby(self, name, amount=1):
  1050. """
  1051. Increments the value of ``key`` by ``amount``. If no key exists,
  1052. the value will be initialized as ``amount``
  1053. """
  1054. # An alias for ``incr()``, because it is already implemented
  1055. # as INCRBY redis command.
  1056. return self.execute_command('INCRBY', name, amount)
  1057. def incrbyfloat(self, name, amount=1.0):
  1058. """
  1059. Increments the value at key ``name`` by floating ``amount``.
  1060. If no key exists, the value will be initialized as ``amount``
  1061. """
  1062. return self.execute_command('INCRBYFLOAT', name, amount)
  1063. def keys(self, pattern='*'):
  1064. "Returns a list of keys matching ``pattern``"
  1065. return self.execute_command('KEYS', pattern)
  1066. def mget(self, keys, *args):
  1067. """
  1068. Returns a list of values ordered identically to ``keys``
  1069. """
  1070. args = list_or_args(keys, args)
  1071. options = {}
  1072. if not args:
  1073. options[EMPTY_RESPONSE] = []
  1074. return self.execute_command('MGET', *args, **options)
  1075. def mset(self, mapping):
  1076. """
  1077. Sets key/values based on a mapping. Mapping is a dictionary of
  1078. key/value pairs. Both keys and values should be strings or types that
  1079. can be cast to a string via str().
  1080. """
  1081. items = []
  1082. for pair in iteritems(mapping):
  1083. items.extend(pair)
  1084. return self.execute_command('MSET', *items)
  1085. def msetnx(self, mapping):
  1086. """
  1087. Sets key/values based on a mapping if none of the keys are already set.
  1088. Mapping is a dictionary of key/value pairs. Both keys and values
  1089. should be strings or types that can be cast to a string via str().
  1090. Returns a boolean indicating if the operation was successful.
  1091. """
  1092. items = []
  1093. for pair in iteritems(mapping):
  1094. items.extend(pair)
  1095. return self.execute_command('MSETNX', *items)
  1096. def move(self, name, db):
  1097. "Moves the key ``name`` to a different Redis database ``db``"
  1098. return self.execute_command('MOVE', name, db)
  1099. def persist(self, name):
  1100. "Removes an expiration on ``name``"
  1101. return self.execute_command('PERSIST', name)
  1102. def pexpire(self, name, time):
  1103. """
  1104. Set an expire flag on key ``name`` for ``time`` milliseconds.
  1105. ``time`` can be represented by an integer or a Python timedelta
  1106. object.
  1107. """
  1108. if isinstance(time, datetime.timedelta):
  1109. time = int(time.total_seconds() * 1000)
  1110. return self.execute_command('PEXPIRE', name, time)
  1111. def pexpireat(self, name, when):
  1112. """
  1113. Set an expire flag on key ``name``. ``when`` can be represented
  1114. as an integer representing unix time in milliseconds (unix time * 1000)
  1115. or a Python datetime object.
  1116. """
  1117. if isinstance(when, datetime.datetime):
  1118. ms = int(when.microsecond / 1000)
  1119. when = int(mod_time.mktime(when.timetuple())) * 1000 + ms
  1120. return self.execute_command('PEXPIREAT', name, when)
  1121. def psetex(self, name, time_ms, value):
  1122. """
  1123. Set the value of key ``name`` to ``value`` that expires in ``time_ms``
  1124. milliseconds. ``time_ms`` can be represented by an integer or a Python
  1125. timedelta object
  1126. """
  1127. if isinstance(time_ms, datetime.timedelta):
  1128. time_ms = int(time_ms.total_seconds() * 1000)
  1129. return self.execute_command('PSETEX', name, time_ms, value)
  1130. def pttl(self, name):
  1131. "Returns the number of milliseconds until the key ``name`` will expire"
  1132. return self.execute_command('PTTL', name)
  1133. def randomkey(self):
  1134. "Returns the name of a random key"
  1135. return self.execute_command('RANDOMKEY')
  1136. def rename(self, src, dst):
  1137. """
  1138. Rename key ``src`` to ``dst``
  1139. """
  1140. return self.execute_command('RENAME', src, dst)
  1141. def renamenx(self, src, dst):
  1142. "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
  1143. return self.execute_command('RENAMENX', src, dst)
  1144. def restore(self, name, ttl, value, replace=False):
  1145. """
  1146. Create a key using the provided serialized value, previously obtained
  1147. using DUMP.
  1148. """
  1149. params = [name, ttl, value]
  1150. if replace:
  1151. params.append('REPLACE')
  1152. return self.execute_command('RESTORE', *params)
  1153. def set(self, name, value, ex=None, px=None, nx=False, xx=False):
  1154. """
  1155. Set the value at key ``name`` to ``value``
  1156. ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
  1157. ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
  1158. ``nx`` if set to True, set the value at key ``name`` to ``value`` only
  1159. if it does not exist.
  1160. ``xx`` if set to True, set the value at key ``name`` to ``value`` only
  1161. if it already exists.
  1162. """
  1163. pieces = [name, value]
  1164. if ex is not None:
  1165. pieces.append('EX')
  1166. if isinstance(ex, datetime.timedelta):
  1167. ex = int(ex.total_seconds())
  1168. pieces.append(ex)
  1169. if px is not None:
  1170. pieces.append('PX')
  1171. if isinstance(px, datetime.timedelta):
  1172. px = int(px.total_seconds() * 1000)
  1173. pieces.append(px)
  1174. if nx:
  1175. pieces.append('NX')
  1176. if xx:
  1177. pieces.append('XX')
  1178. return self.execute_command('SET', *pieces)
  1179. def __setitem__(self, name, value):
  1180. self.set(name, value)
  1181. def setbit(self, name, offset, value):
  1182. """
  1183. Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
  1184. indicating the previous value of ``offset``.
  1185. """
  1186. value = value and 1 or 0
  1187. return self.execute_command('SETBIT', name, offset, value)
  1188. def setex(self, name, time, value):
  1189. """
  1190. Set the value of key ``name`` to ``value`` that expires in ``time``
  1191. seconds. ``time`` can be represented by an integer or a Python
  1192. timedelta object.
  1193. """
  1194. if isinstance(time, datetime.timedelta):
  1195. time = int(time.total_seconds())
  1196. return self.execute_command('SETEX', name, time, value)
  1197. def setnx(self, name, value):
  1198. "Set the value of key ``name`` to ``value`` if key doesn't exist"
  1199. return self.execute_command('SETNX', name, value)
  1200. def setrange(self, name, offset, value):
  1201. """
  1202. Overwrite bytes in the value of ``name`` starting at ``offset`` with
  1203. ``value``. If ``offset`` plus the length of ``value`` exceeds the
  1204. length of the original value, the new value will be larger than before.
  1205. If ``offset`` exceeds the length of the original value, null bytes
  1206. will be used to pad between the end of the previous value and the start
  1207. of what's being injected.
  1208. Returns the length of the new string.
  1209. """
  1210. return self.execute_command('SETRANGE', name, offset, value)
  1211. def strlen(self, name):
  1212. "Return the number of bytes stored in the value of ``name``"
  1213. return self.execute_command('STRLEN', name)
  1214. def substr(self, name, start, end=-1):
  1215. """
  1216. Return a substring of the string at key ``name``. ``start`` and ``end``
  1217. are 0-based integers specifying the portion of the string to return.
  1218. """
  1219. return self.execute_command('SUBSTR', name, start, end)
  1220. def touch(self, *args):
  1221. """
  1222. Alters the last access time of a key(s) ``*args``. A key is ignored
  1223. if it does not exist.
  1224. """
  1225. return self.execute_command('TOUCH', *args)
  1226. def ttl(self, name):
  1227. "Returns the number of seconds until the key ``name`` will expire"
  1228. return self.execute_command('TTL', name)
  1229. def type(self, name):
  1230. "Returns the type of key ``name``"
  1231. return self.execute_command('TYPE', name)
  1232. def watch(self, *names):
  1233. """
  1234. Watches the values at keys ``names``, or None if the key doesn't exist
  1235. """
  1236. warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
  1237. def unwatch(self):
  1238. """
  1239. Unwatches the value at key ``name``, or None of the key doesn't exist
  1240. """
  1241. warnings.warn(
  1242. DeprecationWarning('Call UNWATCH from a Pipeline object'))
  1243. def unlink(self, *names):
  1244. "Unlink one or more keys specified by ``names``"
  1245. return self.execute_command('UNLINK', *names)
  1246. # LIST COMMANDS
  1247. def blpop(self, keys, timeout=0):
  1248. """
  1249. LPOP a value off of the first non-empty list
  1250. named in the ``keys`` list.
  1251. If none of the lists in ``keys`` has a value to LPOP, then block
  1252. for ``timeout`` seconds, or until a value gets pushed on to one
  1253. of the lists.
  1254. If timeout is 0, then block indefinitely.
  1255. """
  1256. if timeout is None:
  1257. timeout = 0
  1258. keys = list_or_args(keys, None)
  1259. keys.append(timeout)
  1260. return self.execute_command('BLPOP', *keys)
  1261. def brpop(self, keys, timeout=0):
  1262. """
  1263. RPOP a value off of the first non-empty list
  1264. named in the ``keys`` list.
  1265. If none of the lists in ``keys`` has a value to RPOP, then block
  1266. for ``timeout`` seconds, or until a value gets pushed on to one
  1267. of the lists.
  1268. If timeout is 0, then block indefinitely.
  1269. """
  1270. if timeout is None:
  1271. timeout = 0
  1272. keys = list_or_args(keys, None)
  1273. keys.append(timeout)
  1274. return self.execute_command('BRPOP', *keys)
  1275. def brpoplpush(self, src, dst, timeout=0):
  1276. """
  1277. Pop a value off the tail of ``src``, push it on the head of ``dst``
  1278. and then return it.
  1279. This command blocks until a value is in ``src`` or until ``timeout``
  1280. seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
  1281. forever.
  1282. """
  1283. if timeout is None:
  1284. timeout = 0
  1285. return self.execute_command('BRPOPLPUSH', src, dst, timeout)
  1286. def lindex(self, name, index):
  1287. """
  1288. Return the item from list ``name`` at position ``index``
  1289. Negative indexes are supported and will return an item at the
  1290. end of the list
  1291. """
  1292. return self.execute_command('LINDEX', name, index)
  1293. def linsert(self, name, where, refvalue, value):
  1294. """
  1295. Insert ``value`` in list ``name`` either immediately before or after
  1296. [``where``] ``refvalue``
  1297. Returns the new length of the list on success or -1 if ``refvalue``
  1298. is not in the list.
  1299. """
  1300. return self.execute_command('LINSERT', name, where, refvalue, value)
  1301. def llen(self, name):
  1302. "Return the length of the list ``name``"
  1303. return self.execute_command('LLEN', name)
  1304. def lpop(self, name):
  1305. "Remove and return the first item of the list ``name``"
  1306. return self.execute_command('LPOP', name)
  1307. def lpush(self, name, *values):
  1308. "Push ``values`` onto the head of the list ``name``"
  1309. return self.execute_command('LPUSH', name, *values)
  1310. def lpushx(self, name, value):
  1311. "Push ``value`` onto the head of the list ``name`` if ``name`` exists"
  1312. return self.execute_command('LPUSHX', name, value)
  1313. def lrange(self, name, start, end):
  1314. """
  1315. Return a slice of the list ``name`` between
  1316. position ``start`` and ``end``
  1317. ``start`` and ``end`` can be negative numbers just like
  1318. Python slicing notation
  1319. """
  1320. return self.execute_command('LRANGE', name, start, end)
  1321. def lrem(self, name, count, value):
  1322. """
  1323. Remove the first ``count`` occurrences of elements equal to ``value``
  1324. from the list stored at ``name``.
  1325. The count argument influences the operation in the following ways:
  1326. count > 0: Remove elements equal to value moving from head to tail.
  1327. count < 0: Remove elements equal to value moving from tail to head.
  1328. count = 0: Remove all elements equal to value.
  1329. """
  1330. return self.execute_command('LREM', name, count, value)
  1331. def lset(self, name, index, value):
  1332. "Set ``position`` of list ``name`` to ``value``"
  1333. return self.execute_command('LSET', name, index, value)
  1334. def ltrim(self, name, start, end):
  1335. """
  1336. Trim the list ``name``, removing all values not within the slice
  1337. between ``start`` and ``end``
  1338. ``start`` and ``end`` can be negative numbers just like
  1339. Python slicing notation
  1340. """
  1341. return self.execute_command('LTRIM', name, start, end)
  1342. def rpop(self, name):
  1343. "Remove and return the last item of the list ``name``"
  1344. return self.execute_command('RPOP', name)
  1345. def rpoplpush(self, src, dst):
  1346. """
  1347. RPOP a value off of the ``src`` list and atomically LPUSH it
  1348. on to the ``dst`` list. Returns the value.
  1349. """
  1350. return self.execute_command('RPOPLPUSH', src, dst)
  1351. def rpush(self, name, *values):
  1352. "Push ``values`` onto the tail of the list ``name``"
  1353. return self.execute_command('RPUSH', name, *values)
  1354. def rpushx(self, name, value):
  1355. "Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
  1356. return self.execute_command('RPUSHX', name, value)
  1357. def sort(self, name, start=None, num=None, by=None, get=None,
  1358. desc=False, alpha=False, store=None, groups=False):
  1359. """
  1360. Sort and return the list, set or sorted set at ``name``.
  1361. ``start`` and ``num`` allow for paging through the sorted data
  1362. ``by`` allows using an external key to weight and sort the items.
  1363. Use an "*" to indicate where in the key the item value is located
  1364. ``get`` allows for returning items from external keys rather than the
  1365. sorted data itself. Use an "*" to indicate where int he key
  1366. the item value is located
  1367. ``desc`` allows for reversing the sort
  1368. ``alpha`` allows for sorting lexicographically rather than numerically
  1369. ``store`` allows for storing the result of the sort into
  1370. the key ``store``
  1371. ``groups`` if set to True and if ``get`` contains at least two
  1372. elements, sort will return a list of tuples, each containing the
  1373. values fetched from the arguments to ``get``.
  1374. """
  1375. if (start is not None and num is None) or \
  1376. (num is not None and start is None):
  1377. raise DataError("``start`` and ``num`` must both be specified")
  1378. pieces = [name]
  1379. if by is not None:
  1380. pieces.append(Token.get_token('BY'))
  1381. pieces.append(by)
  1382. if start is not None and num is not None:
  1383. pieces.append(Token.get_token('LIMIT'))
  1384. pieces.append(start)
  1385. pieces.append(num)
  1386. if get is not None:
  1387. # If get is a string assume we want to get a single value.
  1388. # Otherwise assume it's an interable and we want to get multiple
  1389. # values. We can't just iterate blindly because strings are
  1390. # iterable.
  1391. if isinstance(get, (bytes, basestring)):
  1392. pieces.append(Token.get_token('GET'))
  1393. pieces.append(get)
  1394. else:
  1395. for g in get:
  1396. pieces.append(Token.get_token('GET'))
  1397. pieces.append(g)
  1398. if desc:
  1399. pieces.append(Token.get_token('DESC'))
  1400. if alpha:
  1401. pieces.append(Token.get_token('ALPHA'))
  1402. if store is not None:
  1403. pieces.append(Token.get_token('STORE'))
  1404. pieces.append(store)
  1405. if groups:
  1406. if not get or isinstance(get, (bytes, basestring)) or len(get) < 2:
  1407. raise DataError('when using "groups" the "get" argument '
  1408. 'must be specified and contain at least '
  1409. 'two keys')
  1410. options = {'groups': len(get) if groups else None}
  1411. return self.execute_command('SORT', *pieces, **options)
  1412. # SCAN COMMANDS
  1413. def scan(self, cursor=0, match=None, count=None):
  1414. """
  1415. Incrementally return lists of key names. Also return a cursor
  1416. indicating the scan position.
  1417. ``match`` allows for filtering the keys by pattern
  1418. ``count`` allows for hint the minimum number of returns
  1419. """
  1420. pieces = [cursor]
  1421. if match is not None:
  1422. pieces.extend([Token.get_token('MATCH'), match])
  1423. if count is not None:
  1424. pieces.extend([Token.get_token('COUNT'), count])
  1425. return self.execute_command('SCAN', *pieces)
  1426. def scan_iter(self, match=None, count=None):
  1427. """
  1428. Make an iterator using the SCAN command so that the client doesn't
  1429. need to remember the cursor position.
  1430. ``match`` allows for filtering the keys by pattern
  1431. ``count`` allows for hint the minimum number of returns
  1432. """
  1433. cursor = '0'
  1434. while cursor != 0:
  1435. cursor, data = self.scan(cursor=cursor, match=match, count=count)
  1436. for item in data:
  1437. yield item
  1438. def sscan(self, name, cursor=0, match=None, count=None):
  1439. """
  1440. Incrementally return lists of elements in a set. Also return a cursor
  1441. indicating the scan position.
  1442. ``match`` allows for filtering the keys by pattern
  1443. ``count`` allows for hint the minimum number of returns
  1444. """
  1445. pieces = [name, cursor]
  1446. if match is not None:
  1447. pieces.extend([Token.get_token('MATCH'), match])
  1448. if count is not None:
  1449. pieces.extend([Token.get_token('COUNT'), count])
  1450. return self.execute_command('SSCAN', *pieces)
  1451. def sscan_iter(self, name, match=None, count=None):
  1452. """
  1453. Make an iterator using the SSCAN command so that the client doesn't
  1454. need to remember the cursor position.
  1455. ``match`` allows for filtering the keys by pattern
  1456. ``count`` allows for hint the minimum number of returns
  1457. """
  1458. cursor = '0'
  1459. while cursor != 0:
  1460. cursor, data = self.sscan(name, cursor=cursor,
  1461. match=match, count=count)
  1462. for item in data:
  1463. yield item
  1464. def hscan(self, name, cursor=0, match=None, count=None):
  1465. """
  1466. Incrementally return key/value slices in a hash. Also return a cursor
  1467. indicating the scan position.
  1468. ``match`` allows for filtering the keys by pattern
  1469. ``count`` allows for hint the minimum number of returns
  1470. """
  1471. pieces = [name, cursor]
  1472. if match is not None:
  1473. pieces.extend([Token.get_token('MATCH'), match])
  1474. if count is not None:
  1475. pieces.extend([Token.get_token('COUNT'), count])
  1476. return self.execute_command('HSCAN', *pieces)
  1477. def hscan_iter(self, name, match=None, count=None):
  1478. """
  1479. Make an iterator using the HSCAN command so that the client doesn't
  1480. need to remember the cursor position.
  1481. ``match`` allows for filtering the keys by pattern
  1482. ``count`` allows for hint the minimum number of returns
  1483. """
  1484. cursor = '0'
  1485. while cursor != 0:
  1486. cursor, data = self.hscan(name, cursor=cursor,
  1487. match=match, count=count)
  1488. for item in data.items():
  1489. yield item
  1490. def zscan(self, name, cursor=0, match=None, count=None,
  1491. score_cast_func=float):
  1492. """
  1493. Incrementally return lists of elements in a sorted set. Also return a
  1494. cursor indicating the scan position.
  1495. ``match`` allows for filtering the keys by pattern
  1496. ``count`` allows for hint the minimum number of returns
  1497. ``score_cast_func`` a callable used to cast the score return value
  1498. """
  1499. pieces = [name, cursor]
  1500. if match is not None:
  1501. pieces.extend([Token.get_token('MATCH'), match])
  1502. if count is not None:
  1503. pieces.extend([Token.get_token('COUNT'), count])
  1504. options = {'score_cast_func': score_cast_func}
  1505. return self.execute_command('ZSCAN', *pieces, **options)
  1506. def zscan_iter(self, name, match=None, count=None,
  1507. score_cast_func=float):
  1508. """
  1509. Make an iterator using the ZSCAN command so that the client doesn't
  1510. need to remember the cursor position.
  1511. ``match`` allows for filtering the keys by pattern
  1512. ``count`` allows for hint the minimum number of returns
  1513. ``score_cast_func`` a callable used to cast the score return value
  1514. """
  1515. cursor = '0'
  1516. while cursor != 0:
  1517. cursor, data = self.zscan(name, cursor=cursor, match=match,
  1518. count=count,
  1519. score_cast_func=score_cast_func)
  1520. for item in data:
  1521. yield item
  1522. # SET COMMANDS
  1523. def sadd(self, name, *values):
  1524. "Add ``value(s)`` to set ``name``"
  1525. return self.execute_command('SADD', name, *values)
  1526. def scard(self, name):
  1527. "Return the number of elements in set ``name``"
  1528. return self.execute_command('SCARD', name)
  1529. def sdiff(self, keys, *args):
  1530. "Return the difference of sets specified by ``keys``"
  1531. args = list_or_args(keys, args)
  1532. return self.execute_command('SDIFF', *args)
  1533. def sdiffstore(self, dest, keys, *args):
  1534. """
  1535. Store the difference of sets specified by ``keys`` into a new
  1536. set named ``dest``. Returns the number of keys in the new set.
  1537. """
  1538. args = list_or_args(keys, args)
  1539. return self.execute_command('SDIFFSTORE', dest, *args)
  1540. def sinter(self, keys, *args):
  1541. "Return the intersection of sets specified by ``keys``"
  1542. args = list_or_args(keys, args)
  1543. return self.execute_command('SINTER', *args)
  1544. def sinterstore(self, dest, keys, *args):
  1545. """
  1546. Store the intersection of sets specified by ``keys`` into a new
  1547. set named ``dest``. Returns the number of keys in the new set.
  1548. """
  1549. args = list_or_args(keys, args)
  1550. return self.execute_command('SINTERSTORE', dest, *args)
  1551. def sismember(self, name, value):
  1552. "Return a boolean indicating if ``value`` is a member of set ``name``"
  1553. return self.execute_command('SISMEMBER', name, value)
  1554. def smembers(self, name):
  1555. "Return all members of the set ``name``"
  1556. return self.execute_command('SMEMBERS', name)
  1557. def smove(self, src, dst, value):
  1558. "Move ``value`` from set ``src`` to set ``dst`` atomically"
  1559. return self.execute_command('SMOVE', src, dst, value)
  1560. def spop(self, name, count=None):
  1561. "Remove and return a random member of set ``name``"
  1562. args = (count is not None) and [count] or []
  1563. return self.execute_command('SPOP', name, *args)
  1564. def srandmember(self, name, number=None):
  1565. """
  1566. If ``number`` is None, returns a random member of set ``name``.
  1567. If ``number`` is supplied, returns a list of ``number`` random
  1568. memebers of set ``name``. Note this is only available when running
  1569. Redis 2.6+.
  1570. """
  1571. args = (number is not None) and [number] or []
  1572. return self.execute_command('SRANDMEMBER', name, *args)
  1573. def srem(self, name, *values):
  1574. "Remove ``values`` from set ``name``"
  1575. return self.execute_command('SREM', name, *values)
  1576. def sunion(self, keys, *args):
  1577. "Return the union of sets specified by ``keys``"
  1578. args = list_or_args(keys, args)
  1579. return self.execute_command('SUNION', *args)
  1580. def sunionstore(self, dest, keys, *args):
  1581. """
  1582. Store the union of sets specified by ``keys`` into a new
  1583. set named ``dest``. Returns the number of keys in the new set.
  1584. """
  1585. args = list_or_args(keys, args)
  1586. return self.execute_command('SUNIONSTORE', dest, *args)
  1587. # STREAMS COMMANDS
  1588. def xack(self, name, groupname, *ids):
  1589. """
  1590. Acknowledges the successful processing of one or more messages.
  1591. name: name of the stream.
  1592. groupname: name of the consumer group.
  1593. *ids: message ids to acknowlege.
  1594. """
  1595. return self.execute_command('XACK', name, groupname, *ids)
  1596. def xadd(self, name, fields, id='*', maxlen=None, approximate=True):
  1597. """
  1598. Add to a stream.
  1599. name: name of the stream
  1600. fields: dict of field/value pairs to insert into the stream
  1601. id: Location to insert this record. By default it is appended.
  1602. maxlen: truncate old stream members beyond this size
  1603. approximate: actual stream length may be slightly more than maxlen
  1604. """
  1605. pieces = []
  1606. if maxlen is not None:
  1607. if not isinstance(maxlen, (int, long)) or maxlen < 1:
  1608. raise DataError('XADD maxlen must be a positive integer')
  1609. pieces.append(Token.get_token('MAXLEN'))
  1610. if approximate:
  1611. pieces.append(Token.get_token('~'))
  1612. pieces.append(str(maxlen))
  1613. pieces.append(id)
  1614. if not isinstance(fields, dict) or len(fields) == 0:
  1615. raise DataError('XADD fields must be a non-empty dict')
  1616. for pair in iteritems(fields):
  1617. pieces.extend(pair)
  1618. return self.execute_command('XADD', name, *pieces)
  1619. def xclaim(self, name, groupname, consumername, min_idle_time, message_ids,
  1620. idle=None, time=None, retrycount=None, force=False,
  1621. justid=False):
  1622. """
  1623. Changes the ownership of a pending message.
  1624. name: name of the stream.
  1625. groupname: name of the consumer group.
  1626. consumername: name of a consumer that claims the message.
  1627. min_idle_time: filter messages that were idle less than this amount of
  1628. milliseconds
  1629. message_ids: non-empty list or tuple of message IDs to claim
  1630. idle: optional. Set the idle time (last time it was delivered) of the
  1631. message in ms
  1632. time: optional integer. This is the same as idle but instead of a
  1633. relative amount of milliseconds, it sets the idle time to a specific
  1634. Unix time (in milliseconds).
  1635. retrycount: optional integer. set the retry counter to the specified
  1636. value. This counter is incremented every time a message is delivered
  1637. again.
  1638. force: optional boolean, false by default. Creates the pending message
  1639. entry in the PEL even if certain specified IDs are not already in the
  1640. PEL assigned to a different client.
  1641. justid: optional boolean, false by default. Return just an array of IDs
  1642. of messages successfully claimed, without returning the actual message
  1643. """
  1644. if not isinstance(min_idle_time, (int, long)) or min_idle_time < 0:
  1645. raise DataError("XCLAIM min_idle_time must be a non negative "
  1646. "integer")
  1647. if not isinstance(message_ids, (list, tuple)) or not message_ids:
  1648. raise DataError("XCLAIM message_ids must be a non empty list or "
  1649. "tuple of message IDs to claim")
  1650. kwargs = {}
  1651. pieces = [name, groupname, consumername, str(min_idle_time)]
  1652. pieces.extend(list(message_ids))
  1653. if idle is not None:
  1654. if not isinstance(idle, (int, long)):
  1655. raise DataError("XCLAIM idle must be an integer")
  1656. pieces.extend((Token.get_token('IDLE'), str(idle)))
  1657. if time is not None:
  1658. if not isinstance(time, (int, long)):
  1659. raise DataError("XCLAIM time must be an integer")
  1660. pieces.extend((Token.get_token('TIME'), str(time)))
  1661. if retrycount is not None:
  1662. if not isinstance(retrycount, (int, long)):
  1663. raise DataError("XCLAIM retrycount must be an integer")
  1664. pieces.extend((Token.get_token('RETRYCOUNT'), str(retrycount)))
  1665. if force:
  1666. if not isinstance(force, bool):
  1667. raise DataError("XCLAIM force must be a boolean")
  1668. pieces.append(Token.get_token('FORCE'))
  1669. if justid:
  1670. if not isinstance(justid, bool):
  1671. raise DataError("XCLAIM justid must be a boolean")
  1672. pieces.append(Token.get_token('JUSTID'))
  1673. kwargs['parse_justid'] = True
  1674. return self.execute_command('XCLAIM', *pieces, **kwargs)
  1675. def xdel(self, name, *ids):
  1676. """
  1677. Deletes one or more messages from a stream.
  1678. name: name of the stream.
  1679. *ids: message ids to delete.
  1680. """
  1681. return self.execute_command('XDEL', name, *ids)
  1682. def xgroup_create(self, name, groupname, id='$', mkstream=False):
  1683. """
  1684. Create a new consumer group associated with a stream.
  1685. name: name of the stream.
  1686. groupname: name of the consumer group.
  1687. id: ID of the last item in the stream to consider already delivered.
  1688. """
  1689. pieces = ['XGROUP CREATE', name, groupname, id]
  1690. if mkstream:
  1691. pieces.append(Token.get_token('MKSTREAM'))
  1692. return self.execute_command(*pieces)
  1693. def xgroup_delconsumer(self, name, groupname, consumername):
  1694. """
  1695. Remove a specific consumer from a consumer group.
  1696. Returns the number of pending messages that the consumer had before it
  1697. was deleted.
  1698. name: name of the stream.
  1699. groupname: name of the consumer group.
  1700. consumername: name of consumer to delete
  1701. """
  1702. return self.execute_command('XGROUP DELCONSUMER', name, groupname,
  1703. consumername)
  1704. def xgroup_destroy(self, name, groupname):
  1705. """
  1706. Destroy a consumer group.
  1707. name: name of the stream.
  1708. groupname: name of the consumer group.
  1709. """
  1710. return self.execute_command('XGROUP DESTROY', name, groupname)
  1711. def xgroup_setid(self, name, groupname, id):
  1712. """
  1713. Set the consumer group last delivered ID to something else.
  1714. name: name of the stream.
  1715. groupname: name of the consumer group.
  1716. id: ID of the last item in the stream to consider already delivered.
  1717. """
  1718. return self.execute_command('XGROUP SETID', name, groupname, id)
  1719. def xinfo_consumers(self, name, groupname):
  1720. """
  1721. Returns general information about the consumers in the group.
  1722. name: name of the stream.
  1723. groupname: name of the consumer group.
  1724. """
  1725. return self.execute_command('XINFO CONSUMERS', name, groupname)
  1726. def xinfo_groups(self, name):
  1727. """
  1728. Returns general information about the consumer groups of the stream.
  1729. name: name of the stream.
  1730. """
  1731. return self.execute_command('XINFO GROUPS', name)
  1732. def xinfo_stream(self, name):
  1733. """
  1734. Returns general information about the stream.
  1735. name: name of the stream.
  1736. """
  1737. return self.execute_command('XINFO STREAM', name)
  1738. def xlen(self, name):
  1739. """
  1740. Returns the number of elements in a given stream.
  1741. """
  1742. return self.execute_command('XLEN', name)
  1743. def xpending(self, name, groupname):
  1744. """
  1745. Returns information about pending messages of a group.
  1746. name: name of the stream.
  1747. groupname: name of the consumer group.
  1748. """
  1749. return self.execute_command('XPENDING', name, groupname)
  1750. def xpending_range(self, name, groupname, min='-', max='+', count=-1,
  1751. consumername=None):
  1752. """
  1753. Returns information about pending messages, in a range.
  1754. name: name of the stream.
  1755. groupname: name of the consumer group.
  1756. start: first stream ID. defaults to '-',
  1757. meaning the earliest available.
  1758. finish: last stream ID. defaults to '+',
  1759. meaning the latest available.
  1760. count: if set, only return this many items, beginning with the
  1761. earliest available.
  1762. consumername: name of a consumer to filter by (optional).
  1763. """
  1764. pieces = [name, groupname]
  1765. if min is not None or max is not None or count is not None:
  1766. if min is None or max is None or count is None:
  1767. raise DataError("XPENDING must be provided with min, max "
  1768. "and count parameters, or none of them. ")
  1769. if not isinstance(count, (int, long)) or count < -1:
  1770. raise DataError("XPENDING count must be a integer >= -1")
  1771. pieces.extend((min, max, str(count)))
  1772. if consumername is not None:
  1773. if min is None or max is None or count is None:
  1774. raise DataError("if XPENDING is provided with consumername,"
  1775. " it must be provided with min, max and"
  1776. " count parameters")
  1777. pieces.append(consumername)
  1778. return self.execute_command('XPENDING', *pieces, parse_detail=True)
  1779. def xrange(self, name, min='-', max='+', count=None):
  1780. """
  1781. Read stream values within an interval.
  1782. name: name of the stream.
  1783. start: first stream ID. defaults to '-',
  1784. meaning the earliest available.
  1785. finish: last stream ID. defaults to '+',
  1786. meaning the latest available.
  1787. count: if set, only return this many items, beginning with the
  1788. earliest available.
  1789. """
  1790. pieces = [min, max]
  1791. if count is not None:
  1792. if not isinstance(count, (int, long)) or count < 1:
  1793. raise DataError('XRANGE count must be a positive integer')
  1794. pieces.append(Token.get_token('COUNT'))
  1795. pieces.append(str(count))
  1796. return self.execute_command('XRANGE', name, *pieces)
  1797. def xread(self, streams, count=None, block=None):
  1798. """
  1799. Block and monitor multiple streams for new data.
  1800. streams: a dict of stream names to stream IDs, where
  1801. IDs indicate the last ID already seen.
  1802. count: if set, only return this many items, beginning with the
  1803. earliest available.
  1804. block: number of milliseconds to wait, if nothing already present.
  1805. """
  1806. pieces = []
  1807. if block is not None:
  1808. if not isinstance(block, (int, long)) or block < 0:
  1809. raise DataError('XREAD block must be a non-negative integer')
  1810. pieces.append(Token.get_token('BLOCK'))
  1811. pieces.append(str(block))
  1812. if count is not None:
  1813. if not isinstance(count, (int, long)) or count < 1:
  1814. raise DataError('XREAD count must be a positive integer')
  1815. pieces.append(Token.get_token('COUNT'))
  1816. pieces.append(str(count))
  1817. if not isinstance(streams, dict) or len(streams) == 0:
  1818. raise DataError('XREAD streams must be a non empty dict')
  1819. pieces.append(Token.get_token('STREAMS'))
  1820. keys, values = izip(*iteritems(streams))
  1821. pieces.extend(keys)
  1822. pieces.extend(values)
  1823. return self.execute_command('XREAD', *pieces)
  1824. def xreadgroup(self, groupname, consumername, streams, count=None,
  1825. block=None):
  1826. """
  1827. Read from a stream via a consumer group.
  1828. groupname: name of the consumer group.
  1829. consumername: name of the requesting consumer.
  1830. streams: a dict of stream names to stream IDs, where
  1831. IDs indicate the last ID already seen.
  1832. count: if set, only return this many items, beginning with the
  1833. earliest available.
  1834. block: number of milliseconds to wait, if nothing already present.
  1835. """
  1836. pieces = [Token.get_token('GROUP'), groupname, consumername]
  1837. if count is not None:
  1838. if not isinstance(count, (int, long)) or count < 1:
  1839. raise DataError("XREADGROUP count must be a positive integer")
  1840. pieces.append(Token.get_token("COUNT"))
  1841. pieces.append(str(count))
  1842. if block is not None:
  1843. if not isinstance(block, (int, long)) or block < 0:
  1844. raise DataError("XREADGROUP block must be a non-negative "
  1845. "integer")
  1846. pieces.append(Token.get_token("BLOCK"))
  1847. pieces.append(str(block))
  1848. if not isinstance(streams, dict) or len(streams) == 0:
  1849. raise DataError('XREADGROUP streams must be a non empty dict')
  1850. pieces.append(Token.get_token('STREAMS'))
  1851. pieces.extend(streams.keys())
  1852. pieces.extend(streams.values())
  1853. return self.execute_command('XREADGROUP', *pieces)
  1854. def xrevrange(self, name, max='+', min='-', count=None):
  1855. """
  1856. Read stream values within an interval, in reverse order.
  1857. name: name of the stream
  1858. start: first stream ID. defaults to '+',
  1859. meaning the latest available.
  1860. finish: last stream ID. defaults to '-',
  1861. meaning the earliest available.
  1862. count: if set, only return this many items, beginning with the
  1863. latest available.
  1864. """
  1865. pieces = [max, min]
  1866. if count is not None:
  1867. if not isinstance(count, (int, long)) or count < 1:
  1868. raise DataError('XREVRANGE count must be a positive integer')
  1869. pieces.append(Token.get_token('COUNT'))
  1870. pieces.append(str(count))
  1871. return self.execute_command('XREVRANGE', name, *pieces)
  1872. def xtrim(self, name, maxlen, approximate=True):
  1873. """
  1874. Trims old messages from a stream.
  1875. name: name of the stream.
  1876. maxlen: truncate old stream messages beyond this size
  1877. approximate: actual stream length may be slightly more than maxlen
  1878. """
  1879. pieces = [Token.get_token('MAXLEN')]
  1880. if approximate:
  1881. pieces.append(Token.get_token('~'))
  1882. pieces.append(maxlen)
  1883. return self.execute_command('XTRIM', name, *pieces)
  1884. # SORTED SET COMMANDS
  1885. def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False):
  1886. """
  1887. Set any number of element-name, score pairs to the key ``name``. Pairs
  1888. are specified as a dict of element-names keys to score values.
  1889. ``nx`` forces ZADD to only create new elements and not to update
  1890. scores for elements that already exist.
  1891. ``xx`` forces ZADD to only update scores of elements that already
  1892. exist. New elements will not be added.
  1893. ``ch`` modifies the return value to be the numbers of elements changed.
  1894. Changed elements include new elements that were added and elements
  1895. whose scores changed.
  1896. ``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a
  1897. single element/score pair can be specified and the score is the amount
  1898. the existing score will be incremented by. When using this mode the
  1899. return value of ZADD will be the new score of the element.
  1900. The return value of ZADD varies based on the mode specified. With no
  1901. options, ZADD returns the number of new elements added to the sorted
  1902. set.
  1903. """
  1904. if not mapping:
  1905. raise DataError("ZADD requires at least one element/score pair")
  1906. if nx and xx:
  1907. raise DataError("ZADD allows either 'nx' or 'xx', not both")
  1908. if incr and len(mapping) != 1:
  1909. raise DataError("ZADD option 'incr' only works when passing a "
  1910. "single element/score pair")
  1911. pieces = []
  1912. options = {}
  1913. if nx:
  1914. pieces.append(Token.get_token('NX'))
  1915. if xx:
  1916. pieces.append(Token.get_token('XX'))
  1917. if ch:
  1918. pieces.append(Token.get_token('CH'))
  1919. if incr:
  1920. pieces.append(Token.get_token('INCR'))
  1921. options['as_score'] = True
  1922. for pair in iteritems(mapping):
  1923. pieces.append(pair[1])
  1924. pieces.append(pair[0])
  1925. return self.execute_command('ZADD', name, *pieces, **options)
  1926. def zcard(self, name):
  1927. "Return the number of elements in the sorted set ``name``"
  1928. return self.execute_command('ZCARD', name)
  1929. def zcount(self, name, min, max):
  1930. """
  1931. Returns the number of elements in the sorted set at key ``name`` with
  1932. a score between ``min`` and ``max``.
  1933. """
  1934. return self.execute_command('ZCOUNT', name, min, max)
  1935. def zincrby(self, name, amount, value):
  1936. "Increment the score of ``value`` in sorted set ``name`` by ``amount``"
  1937. return self.execute_command('ZINCRBY', name, amount, value)
  1938. def zinterstore(self, dest, keys, aggregate=None):
  1939. """
  1940. Intersect multiple sorted sets specified by ``keys`` into
  1941. a new sorted set, ``dest``. Scores in the destination will be
  1942. aggregated based on the ``aggregate``, or SUM if none is provided.
  1943. """
  1944. return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
  1945. def zlexcount(self, name, min, max):
  1946. """
  1947. Return the number of items in the sorted set ``name`` between the
  1948. lexicographical range ``min`` and ``max``.
  1949. """
  1950. return self.execute_command('ZLEXCOUNT', name, min, max)
  1951. def zpopmax(self, name, count=None):
  1952. """
  1953. Remove and return up to ``count`` members with the highest scores
  1954. from the sorted set ``name``.
  1955. """
  1956. args = (count is not None) and [count] or []
  1957. options = {
  1958. 'withscores': True
  1959. }
  1960. return self.execute_command('ZPOPMAX', name, *args, **options)
  1961. def zpopmin(self, name, count=None):
  1962. """
  1963. Remove and return up to ``count`` members with the lowest scores
  1964. from the sorted set ``name``.
  1965. """
  1966. args = (count is not None) and [count] or []
  1967. options = {
  1968. 'withscores': True
  1969. }
  1970. return self.execute_command('ZPOPMIN', name, *args, **options)
  1971. def bzpopmax(self, keys, timeout=0):
  1972. """
  1973. ZPOPMAX a value off of the first non-empty sorted set
  1974. named in the ``keys`` list.
  1975. If none of the sorted sets in ``keys`` has a value to ZPOPMAX,
  1976. then block for ``timeout`` seconds, or until a member gets added
  1977. to one of the sorted sets.
  1978. If timeout is 0, then block indefinitely.
  1979. """
  1980. if timeout is None:
  1981. timeout = 0
  1982. keys = list_or_args(keys, None)
  1983. keys.append(timeout)
  1984. return self.execute_command('BZPOPMAX', *keys)
  1985. def bzpopmin(self, keys, timeout=0):
  1986. """
  1987. ZPOPMIN a value off of the first non-empty sorted set
  1988. named in the ``keys`` list.
  1989. If none of the sorted sets in ``keys`` has a value to ZPOPMIN,
  1990. then block for ``timeout`` seconds, or until a member gets added
  1991. to one of the sorted sets.
  1992. If timeout is 0, then block indefinitely.
  1993. """
  1994. if timeout is None:
  1995. timeout = 0
  1996. keys = list_or_args(keys, None)
  1997. keys.append(timeout)
  1998. return self.execute_command('BZPOPMIN', *keys)
  1999. def zrange(self, name, start, end, desc=False, withscores=False,
  2000. score_cast_func=float):
  2001. """
  2002. Return a range of values from sorted set ``name`` between
  2003. ``start`` and ``end`` sorted in ascending order.
  2004. ``start`` and ``end`` can be negative, indicating the end of the range.
  2005. ``desc`` a boolean indicating whether to sort the results descendingly
  2006. ``withscores`` indicates to return the scores along with the values.
  2007. The return type is a list of (value, score) pairs
  2008. ``score_cast_func`` a callable used to cast the score return value
  2009. """
  2010. if desc:
  2011. return self.zrevrange(name, start, end, withscores,
  2012. score_cast_func)
  2013. pieces = ['ZRANGE', name, start, end]
  2014. if withscores:
  2015. pieces.append(Token.get_token('WITHSCORES'))
  2016. options = {
  2017. 'withscores': withscores,
  2018. 'score_cast_func': score_cast_func
  2019. }
  2020. return self.execute_command(*pieces, **options)
  2021. def zrangebylex(self, name, min, max, start=None, num=None):
  2022. """
  2023. Return the lexicographical range of values from sorted set ``name``
  2024. between ``min`` and ``max``.
  2025. If ``start`` and ``num`` are specified, then return a slice of the
  2026. range.
  2027. """
  2028. if (start is not None and num is None) or \
  2029. (num is not None and start is None):
  2030. raise DataError("``start`` and ``num`` must both be specified")
  2031. pieces = ['ZRANGEBYLEX', name, min, max]
  2032. if start is not None and num is not None:
  2033. pieces.extend([Token.get_token('LIMIT'), start, num])
  2034. return self.execute_command(*pieces)
  2035. def zrevrangebylex(self, name, max, min, start=None, num=None):
  2036. """
  2037. Return the reversed lexicographical range of values from sorted set
  2038. ``name`` between ``max`` and ``min``.
  2039. If ``start`` and ``num`` are specified, then return a slice of the
  2040. range.
  2041. """
  2042. if (start is not None and num is None) or \
  2043. (num is not None and start is None):
  2044. raise DataError("``start`` and ``num`` must both be specified")
  2045. pieces = ['ZREVRANGEBYLEX', name, max, min]
  2046. if start is not None and num is not None:
  2047. pieces.extend([Token.get_token('LIMIT'), start, num])
  2048. return self.execute_command(*pieces)
  2049. def zrangebyscore(self, name, min, max, start=None, num=None,
  2050. withscores=False, score_cast_func=float):
  2051. """
  2052. Return a range of values from the sorted set ``name`` with scores
  2053. between ``min`` and ``max``.
  2054. If ``start`` and ``num`` are specified, then return a slice
  2055. of the range.
  2056. ``withscores`` indicates to return the scores along with the values.
  2057. The return type is a list of (value, score) pairs
  2058. `score_cast_func`` a callable used to cast the score return value
  2059. """
  2060. if (start is not None and num is None) or \
  2061. (num is not None and start is None):
  2062. raise DataError("``start`` and ``num`` must both be specified")
  2063. pieces = ['ZRANGEBYSCORE', name, min, max]
  2064. if start is not None and num is not None:
  2065. pieces.extend([Token.get_token('LIMIT'), start, num])
  2066. if withscores:
  2067. pieces.append(Token.get_token('WITHSCORES'))
  2068. options = {
  2069. 'withscores': withscores,
  2070. 'score_cast_func': score_cast_func
  2071. }
  2072. return self.execute_command(*pieces, **options)
  2073. def zrank(self, name, value):
  2074. """
  2075. Returns a 0-based value indicating the rank of ``value`` in sorted set
  2076. ``name``
  2077. """
  2078. return self.execute_command('ZRANK', name, value)
  2079. def zrem(self, name, *values):
  2080. "Remove member ``values`` from sorted set ``name``"
  2081. return self.execute_command('ZREM', name, *values)
  2082. def zremrangebylex(self, name, min, max):
  2083. """
  2084. Remove all elements in the sorted set ``name`` between the
  2085. lexicographical range specified by ``min`` and ``max``.
  2086. Returns the number of elements removed.
  2087. """
  2088. return self.execute_command('ZREMRANGEBYLEX', name, min, max)
  2089. def zremrangebyrank(self, name, min, max):
  2090. """
  2091. Remove all elements in the sorted set ``name`` with ranks between
  2092. ``min`` and ``max``. Values are 0-based, ordered from smallest score
  2093. to largest. Values can be negative indicating the highest scores.
  2094. Returns the number of elements removed
  2095. """
  2096. return self.execute_command('ZREMRANGEBYRANK', name, min, max)
  2097. def zremrangebyscore(self, name, min, max):
  2098. """
  2099. Remove all elements in the sorted set ``name`` with scores
  2100. between ``min`` and ``max``. Returns the number of elements removed.
  2101. """
  2102. return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
  2103. def zrevrange(self, name, start, end, withscores=False,
  2104. score_cast_func=float):
  2105. """
  2106. Return a range of values from sorted set ``name`` between
  2107. ``start`` and ``end`` sorted in descending order.
  2108. ``start`` and ``end`` can be negative, indicating the end of the range.
  2109. ``withscores`` indicates to return the scores along with the values
  2110. The return type is a list of (value, score) pairs
  2111. ``score_cast_func`` a callable used to cast the score return value
  2112. """
  2113. pieces = ['ZREVRANGE', name, start, end]
  2114. if withscores:
  2115. pieces.append(Token.get_token('WITHSCORES'))
  2116. options = {
  2117. 'withscores': withscores,
  2118. 'score_cast_func': score_cast_func
  2119. }
  2120. return self.execute_command(*pieces, **options)
  2121. def zrevrangebyscore(self, name, max, min, start=None, num=None,
  2122. withscores=False, score_cast_func=float):
  2123. """
  2124. Return a range of values from the sorted set ``name`` with scores
  2125. between ``min`` and ``max`` in descending order.
  2126. If ``start`` and ``num`` are specified, then return a slice
  2127. of the range.
  2128. ``withscores`` indicates to return the scores along with the values.
  2129. The return type is a list of (value, score) pairs
  2130. ``score_cast_func`` a callable used to cast the score return value
  2131. """
  2132. if (start is not None and num is None) or \
  2133. (num is not None and start is None):
  2134. raise DataError("``start`` and ``num`` must both be specified")
  2135. pieces = ['ZREVRANGEBYSCORE', name, max, min]
  2136. if start is not None and num is not None:
  2137. pieces.extend([Token.get_token('LIMIT'), start, num])
  2138. if withscores:
  2139. pieces.append(Token.get_token('WITHSCORES'))
  2140. options = {
  2141. 'withscores': withscores,
  2142. 'score_cast_func': score_cast_func
  2143. }
  2144. return self.execute_command(*pieces, **options)
  2145. def zrevrank(self, name, value):
  2146. """
  2147. Returns a 0-based value indicating the descending rank of
  2148. ``value`` in sorted set ``name``
  2149. """
  2150. return self.execute_command('ZREVRANK', name, value)
  2151. def zscore(self, name, value):
  2152. "Return the score of element ``value`` in sorted set ``name``"
  2153. return self.execute_command('ZSCORE', name, value)
  2154. def zunionstore(self, dest, keys, aggregate=None):
  2155. """
  2156. Union multiple sorted sets specified by ``keys`` into
  2157. a new sorted set, ``dest``. Scores in the destination will be
  2158. aggregated based on the ``aggregate``, or SUM if none is provided.
  2159. """
  2160. return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
  2161. def _zaggregate(self, command, dest, keys, aggregate=None):
  2162. pieces = [command, dest, len(keys)]
  2163. if isinstance(keys, dict):
  2164. keys, weights = iterkeys(keys), itervalues(keys)
  2165. else:
  2166. weights = None
  2167. pieces.extend(keys)
  2168. if weights:
  2169. pieces.append(Token.get_token('WEIGHTS'))
  2170. pieces.extend(weights)
  2171. if aggregate:
  2172. pieces.append(Token.get_token('AGGREGATE'))
  2173. pieces.append(aggregate)
  2174. return self.execute_command(*pieces)
  2175. # HYPERLOGLOG COMMANDS
  2176. def pfadd(self, name, *values):
  2177. "Adds the specified elements to the specified HyperLogLog."
  2178. return self.execute_command('PFADD', name, *values)
  2179. def pfcount(self, *sources):
  2180. """
  2181. Return the approximated cardinality of
  2182. the set observed by the HyperLogLog at key(s).
  2183. """
  2184. return self.execute_command('PFCOUNT', *sources)
  2185. def pfmerge(self, dest, *sources):
  2186. "Merge N different HyperLogLogs into a single one."
  2187. return self.execute_command('PFMERGE', dest, *sources)
  2188. # HASH COMMANDS
  2189. def hdel(self, name, *keys):
  2190. "Delete ``keys`` from hash ``name``"
  2191. return self.execute_command('HDEL', name, *keys)
  2192. def hexists(self, name, key):
  2193. "Returns a boolean indicating if ``key`` exists within hash ``name``"
  2194. return self.execute_command('HEXISTS', name, key)
  2195. def hget(self, name, key):
  2196. "Return the value of ``key`` within the hash ``name``"
  2197. return self.execute_command('HGET', name, key)
  2198. def hgetall(self, name):
  2199. "Return a Python dict of the hash's name/value pairs"
  2200. return self.execute_command('HGETALL', name)
  2201. def hincrby(self, name, key, amount=1):
  2202. "Increment the value of ``key`` in hash ``name`` by ``amount``"
  2203. return self.execute_command('HINCRBY', name, key, amount)
  2204. def hincrbyfloat(self, name, key, amount=1.0):
  2205. """
  2206. Increment the value of ``key`` in hash ``name`` by floating ``amount``
  2207. """
  2208. return self.execute_command('HINCRBYFLOAT', name, key, amount)
  2209. def hkeys(self, name):
  2210. "Return the list of keys within hash ``name``"
  2211. return self.execute_command('HKEYS', name)
  2212. def hlen(self, name):
  2213. "Return the number of elements in hash ``name``"
  2214. return self.execute_command('HLEN', name)
  2215. def hset(self, name, key, value):
  2216. """
  2217. Set ``key`` to ``value`` within hash ``name``
  2218. Returns 1 if HSET created a new field, otherwise 0
  2219. """
  2220. return self.execute_command('HSET', name, key, value)
  2221. def hsetnx(self, name, key, value):
  2222. """
  2223. Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
  2224. exist. Returns 1 if HSETNX created a field, otherwise 0.
  2225. """
  2226. return self.execute_command('HSETNX', name, key, value)
  2227. def hmset(self, name, mapping):
  2228. """
  2229. Set key to value within hash ``name`` for each corresponding
  2230. key and value from the ``mapping`` dict.
  2231. """
  2232. if not mapping:
  2233. raise DataError("'hmset' with 'mapping' of length 0")
  2234. items = []
  2235. for pair in iteritems(mapping):
  2236. items.extend(pair)
  2237. return self.execute_command('HMSET', name, *items)
  2238. def hmget(self, name, keys, *args):
  2239. "Returns a list of values ordered identically to ``keys``"
  2240. args = list_or_args(keys, args)
  2241. return self.execute_command('HMGET', name, *args)
  2242. def hvals(self, name):
  2243. "Return the list of values within hash ``name``"
  2244. return self.execute_command('HVALS', name)
  2245. def hstrlen(self, name, key):
  2246. """
  2247. Return the number of bytes stored in the value of ``key``
  2248. within hash ``name``
  2249. """
  2250. return self.execute_command('HSTRLEN', name, key)
  2251. def publish(self, channel, message):
  2252. """
  2253. Publish ``message`` on ``channel``.
  2254. Returns the number of subscribers the message was delivered to.
  2255. """
  2256. return self.execute_command('PUBLISH', channel, message)
  2257. def pubsub_channels(self, pattern='*'):
  2258. """
  2259. Return a list of channels that have at least one subscriber
  2260. """
  2261. return self.execute_command('PUBSUB CHANNELS', pattern)
  2262. def pubsub_numpat(self):
  2263. """
  2264. Returns the number of subscriptions to patterns
  2265. """
  2266. return self.execute_command('PUBSUB NUMPAT')
  2267. def pubsub_numsub(self, *args):
  2268. """
  2269. Return a list of (channel, number of subscribers) tuples
  2270. for each channel given in ``*args``
  2271. """
  2272. return self.execute_command('PUBSUB NUMSUB', *args)
  2273. def cluster(self, cluster_arg, *args):
  2274. return self.execute_command('CLUSTER %s' % cluster_arg.upper(), *args)
  2275. def eval(self, script, numkeys, *keys_and_args):
  2276. """
  2277. Execute the Lua ``script``, specifying the ``numkeys`` the script
  2278. will touch and the key names and argument values in ``keys_and_args``.
  2279. Returns the result of the script.
  2280. In practice, use the object returned by ``register_script``. This
  2281. function exists purely for Redis API completion.
  2282. """
  2283. return self.execute_command('EVAL', script, numkeys, *keys_and_args)
  2284. def evalsha(self, sha, numkeys, *keys_and_args):
  2285. """
  2286. Use the ``sha`` to execute a Lua script already registered via EVAL
  2287. or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
  2288. key names and argument values in ``keys_and_args``. Returns the result
  2289. of the script.
  2290. In practice, use the object returned by ``register_script``. This
  2291. function exists purely for Redis API completion.
  2292. """
  2293. return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
  2294. def script_exists(self, *args):
  2295. """
  2296. Check if a script exists in the script cache by specifying the SHAs of
  2297. each script as ``args``. Returns a list of boolean values indicating if
  2298. if each already script exists in the cache.
  2299. """
  2300. return self.execute_command('SCRIPT EXISTS', *args)
  2301. def script_flush(self):
  2302. "Flush all scripts from the script cache"
  2303. return self.execute_command('SCRIPT FLUSH')
  2304. def script_kill(self):
  2305. "Kill the currently executing Lua script"
  2306. return self.execute_command('SCRIPT KILL')
  2307. def script_load(self, script):
  2308. "Load a Lua ``script`` into the script cache. Returns the SHA."
  2309. return self.execute_command('SCRIPT LOAD', script)
  2310. def register_script(self, script):
  2311. """
  2312. Register a Lua ``script`` specifying the ``keys`` it will touch.
  2313. Returns a Script object that is callable and hides the complexity of
  2314. deal with scripts, keys, and shas. This is the preferred way to work
  2315. with Lua scripts.
  2316. """
  2317. return Script(self, script)
  2318. # GEO COMMANDS
  2319. def geoadd(self, name, *values):
  2320. """
  2321. Add the specified geospatial items to the specified key identified
  2322. by the ``name`` argument. The Geospatial items are given as ordered
  2323. members of the ``values`` argument, each item or place is formed by
  2324. the triad longitude, latitude and name.
  2325. """
  2326. if len(values) % 3 != 0:
  2327. raise DataError("GEOADD requires places with lon, lat and name"
  2328. " values")
  2329. return self.execute_command('GEOADD', name, *values)
  2330. def geodist(self, name, place1, place2, unit=None):
  2331. """
  2332. Return the distance between ``place1`` and ``place2`` members of the
  2333. ``name`` key.
  2334. The units must be one of the following : m, km mi, ft. By default
  2335. meters are used.
  2336. """
  2337. pieces = [name, place1, place2]
  2338. if unit and unit not in ('m', 'km', 'mi', 'ft'):
  2339. raise DataError("GEODIST invalid unit")
  2340. elif unit:
  2341. pieces.append(unit)
  2342. return self.execute_command('GEODIST', *pieces)
  2343. def geohash(self, name, *values):
  2344. """
  2345. Return the geo hash string for each item of ``values`` members of
  2346. the specified key identified by the ``name`` argument.
  2347. """
  2348. return self.execute_command('GEOHASH', name, *values)
  2349. def geopos(self, name, *values):
  2350. """
  2351. Return the positions of each item of ``values`` as members of
  2352. the specified key identified by the ``name`` argument. Each position
  2353. is represented by the pairs lon and lat.
  2354. """
  2355. return self.execute_command('GEOPOS', name, *values)
  2356. def georadius(self, name, longitude, latitude, radius, unit=None,
  2357. withdist=False, withcoord=False, withhash=False, count=None,
  2358. sort=None, store=None, store_dist=None):
  2359. """
  2360. Return the members of the specified key identified by the
  2361. ``name`` argument which are within the borders of the area specified
  2362. with the ``latitude`` and ``longitude`` location and the maximum
  2363. distance from the center specified by the ``radius`` value.
  2364. The units must be one of the following : m, km mi, ft. By default
  2365. ``withdist`` indicates to return the distances of each place.
  2366. ``withcoord`` indicates to return the latitude and longitude of
  2367. each place.
  2368. ``withhash`` indicates to return the geohash string of each place.
  2369. ``count`` indicates to return the number of elements up to N.
  2370. ``sort`` indicates to return the places in a sorted way, ASC for
  2371. nearest to fairest and DESC for fairest to nearest.
  2372. ``store`` indicates to save the places names in a sorted set named
  2373. with a specific key, each element of the destination sorted set is
  2374. populated with the score got from the original geo sorted set.
  2375. ``store_dist`` indicates to save the places names in a sorted set
  2376. named with a specific key, instead of ``store`` the sorted set
  2377. destination score is set with the distance.
  2378. """
  2379. return self._georadiusgeneric('GEORADIUS',
  2380. name, longitude, latitude, radius,
  2381. unit=unit, withdist=withdist,
  2382. withcoord=withcoord, withhash=withhash,
  2383. count=count, sort=sort, store=store,
  2384. store_dist=store_dist)
  2385. def georadiusbymember(self, name, member, radius, unit=None,
  2386. withdist=False, withcoord=False, withhash=False,
  2387. count=None, sort=None, store=None, store_dist=None):
  2388. """
  2389. This command is exactly like ``georadius`` with the sole difference
  2390. that instead of taking, as the center of the area to query, a longitude
  2391. and latitude value, it takes the name of a member already existing
  2392. inside the geospatial index represented by the sorted set.
  2393. """
  2394. return self._georadiusgeneric('GEORADIUSBYMEMBER',
  2395. name, member, radius, unit=unit,
  2396. withdist=withdist, withcoord=withcoord,
  2397. withhash=withhash, count=count,
  2398. sort=sort, store=store,
  2399. store_dist=store_dist)
  2400. def _georadiusgeneric(self, command, *args, **kwargs):
  2401. pieces = list(args)
  2402. if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'):
  2403. raise DataError("GEORADIUS invalid unit")
  2404. elif kwargs['unit']:
  2405. pieces.append(kwargs['unit'])
  2406. else:
  2407. pieces.append('m',)
  2408. for token in ('withdist', 'withcoord', 'withhash'):
  2409. if kwargs[token]:
  2410. pieces.append(Token(token.upper()))
  2411. if kwargs['count']:
  2412. pieces.extend([Token('COUNT'), kwargs['count']])
  2413. if kwargs['sort'] and kwargs['sort'] not in ('ASC', 'DESC'):
  2414. raise DataError("GEORADIUS invalid sort")
  2415. elif kwargs['sort']:
  2416. pieces.append(Token(kwargs['sort']))
  2417. if kwargs['store'] and kwargs['store_dist']:
  2418. raise DataError("GEORADIUS store and store_dist cant be set"
  2419. " together")
  2420. if kwargs['store']:
  2421. pieces.extend([Token('STORE'), kwargs['store']])
  2422. if kwargs['store_dist']:
  2423. pieces.extend([Token('STOREDIST'), kwargs['store_dist']])
  2424. return self.execute_command(command, *pieces, **kwargs)
  2425. StrictRedis = Redis
  2426. class PubSub(object):
  2427. """
  2428. PubSub provides publish, subscribe and listen support to Redis channels.
  2429. After subscribing to one or more channels, the listen() method will block
  2430. until a message arrives on one of the subscribed channels. That message
  2431. will be returned and it's safe to start listening again.
  2432. """
  2433. PUBLISH_MESSAGE_TYPES = ('message', 'pmessage')
  2434. UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe')
  2435. def __init__(self, connection_pool, shard_hint=None,
  2436. ignore_subscribe_messages=False):
  2437. self.connection_pool = connection_pool
  2438. self.shard_hint = shard_hint
  2439. self.ignore_subscribe_messages = ignore_subscribe_messages
  2440. self.connection = None
  2441. # we need to know the encoding options for this connection in order
  2442. # to lookup channel and pattern names for callback handlers.
  2443. self.encoder = self.connection_pool.get_encoder()
  2444. self.reset()
  2445. def __del__(self):
  2446. try:
  2447. # if this object went out of scope prior to shutting down
  2448. # subscriptions, close the connection manually before
  2449. # returning it to the connection pool
  2450. self.reset()
  2451. except Exception:
  2452. pass
  2453. def reset(self):
  2454. if self.connection:
  2455. self.connection.disconnect()
  2456. self.connection.clear_connect_callbacks()
  2457. self.connection_pool.release(self.connection)
  2458. self.connection = None
  2459. self.channels = {}
  2460. self.patterns = {}
  2461. def close(self):
  2462. self.reset()
  2463. def on_connect(self, connection):
  2464. "Re-subscribe to any channels and patterns previously subscribed to"
  2465. # NOTE: for python3, we can't pass bytestrings as keyword arguments
  2466. # so we need to decode channel/pattern names back to unicode strings
  2467. # before passing them to [p]subscribe.
  2468. if self.channels:
  2469. channels = {}
  2470. for k, v in iteritems(self.channels):
  2471. channels[self.encoder.decode(k, force=True)] = v
  2472. self.subscribe(**channels)
  2473. if self.patterns:
  2474. patterns = {}
  2475. for k, v in iteritems(self.patterns):
  2476. patterns[self.encoder.decode(k, force=True)] = v
  2477. self.psubscribe(**patterns)
  2478. @property
  2479. def subscribed(self):
  2480. "Indicates if there are subscriptions to any channels or patterns"
  2481. return bool(self.channels or self.patterns)
  2482. def execute_command(self, *args, **kwargs):
  2483. "Execute a publish/subscribe command"
  2484. # NOTE: don't parse the response in this function -- it could pull a
  2485. # legitimate message off the stack if the connection is already
  2486. # subscribed to one or more channels
  2487. if self.connection is None:
  2488. self.connection = self.connection_pool.get_connection(
  2489. 'pubsub',
  2490. self.shard_hint
  2491. )
  2492. # register a callback that re-subscribes to any channels we
  2493. # were listening to when we were disconnected
  2494. self.connection.register_connect_callback(self.on_connect)
  2495. connection = self.connection
  2496. self._execute(connection, connection.send_command, *args)
  2497. def _execute(self, connection, command, *args):
  2498. try:
  2499. return command(*args)
  2500. except (ConnectionError, TimeoutError) as e:
  2501. connection.disconnect()
  2502. if not connection.retry_on_timeout and isinstance(e, TimeoutError):
  2503. raise
  2504. # Connect manually here. If the Redis server is down, this will
  2505. # fail and raise a ConnectionError as desired.
  2506. connection.connect()
  2507. # the ``on_connect`` callback should haven been called by the
  2508. # connection to resubscribe us to any channels and patterns we were
  2509. # previously listening to
  2510. return command(*args)
  2511. def parse_response(self, block=True, timeout=0):
  2512. "Parse the response from a publish/subscribe command"
  2513. connection = self.connection
  2514. if connection is None:
  2515. raise RuntimeError(
  2516. 'pubsub connection not set: '
  2517. 'did you forget to call subscribe() or psubscribe()?')
  2518. if not block and not connection.can_read(timeout=timeout):
  2519. return None
  2520. return self._execute(connection, connection.read_response)
  2521. def _normalize_keys(self, data):
  2522. """
  2523. normalize channel/pattern names to be either bytes or strings
  2524. based on whether responses are automatically decoded. this saves us
  2525. from coercing the value for each message coming in.
  2526. """
  2527. encode = self.encoder.encode
  2528. decode = self.encoder.decode
  2529. return {decode(encode(k)): v for k, v in iteritems(data)}
  2530. def psubscribe(self, *args, **kwargs):
  2531. """
  2532. Subscribe to channel patterns. Patterns supplied as keyword arguments
  2533. expect a pattern name as the key and a callable as the value. A
  2534. pattern's callable will be invoked automatically when a message is
  2535. received on that pattern rather than producing a message via
  2536. ``listen()``.
  2537. """
  2538. if args:
  2539. args = list_or_args(args[0], args[1:])
  2540. new_patterns = dict.fromkeys(args)
  2541. new_patterns.update(kwargs)
  2542. ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns))
  2543. # update the patterns dict AFTER we send the command. we don't want to
  2544. # subscribe twice to these patterns, once for the command and again
  2545. # for the reconnection.
  2546. self.patterns.update(self._normalize_keys(new_patterns))
  2547. return ret_val
  2548. def punsubscribe(self, *args):
  2549. """
  2550. Unsubscribe from the supplied patterns. If empy, unsubscribe from
  2551. all patterns.
  2552. """
  2553. if args:
  2554. args = list_or_args(args[0], args[1:])
  2555. return self.execute_command('PUNSUBSCRIBE', *args)
  2556. def subscribe(self, *args, **kwargs):
  2557. """
  2558. Subscribe to channels. Channels supplied as keyword arguments expect
  2559. a channel name as the key and a callable as the value. A channel's
  2560. callable will be invoked automatically when a message is received on
  2561. that channel rather than producing a message via ``listen()`` or
  2562. ``get_message()``.
  2563. """
  2564. if args:
  2565. args = list_or_args(args[0], args[1:])
  2566. new_channels = dict.fromkeys(args)
  2567. new_channels.update(kwargs)
  2568. ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels))
  2569. # update the channels dict AFTER we send the command. we don't want to
  2570. # subscribe twice to these channels, once for the command and again
  2571. # for the reconnection.
  2572. self.channels.update(self._normalize_keys(new_channels))
  2573. return ret_val
  2574. def unsubscribe(self, *args):
  2575. """
  2576. Unsubscribe from the supplied channels. If empty, unsubscribe from
  2577. all channels
  2578. """
  2579. if args:
  2580. args = list_or_args(args[0], args[1:])
  2581. return self.execute_command('UNSUBSCRIBE', *args)
  2582. def listen(self):
  2583. "Listen for messages on channels this client has been subscribed to"
  2584. while self.subscribed:
  2585. response = self.handle_message(self.parse_response(block=True))
  2586. if response is not None:
  2587. yield response
  2588. def get_message(self, ignore_subscribe_messages=False, timeout=0):
  2589. """
  2590. Get the next message if one is available, otherwise None.
  2591. If timeout is specified, the system will wait for `timeout` seconds
  2592. before returning. Timeout should be specified as a floating point
  2593. number.
  2594. """
  2595. response = self.parse_response(block=False, timeout=timeout)
  2596. if response:
  2597. return self.handle_message(response, ignore_subscribe_messages)
  2598. return None
  2599. def ping(self, message=None):
  2600. """
  2601. Ping the Redis server
  2602. """
  2603. message = '' if message is None else message
  2604. return self.execute_command('PING', message)
  2605. def handle_message(self, response, ignore_subscribe_messages=False):
  2606. """
  2607. Parses a pub/sub message. If the channel or pattern was subscribed to
  2608. with a message handler, the handler is invoked instead of a parsed
  2609. message being returned.
  2610. """
  2611. message_type = nativestr(response[0])
  2612. if message_type == 'pmessage':
  2613. message = {
  2614. 'type': message_type,
  2615. 'pattern': response[1],
  2616. 'channel': response[2],
  2617. 'data': response[3]
  2618. }
  2619. elif message_type == 'pong':
  2620. message = {
  2621. 'type': message_type,
  2622. 'pattern': None,
  2623. 'channel': None,
  2624. 'data': response[1]
  2625. }
  2626. else:
  2627. message = {
  2628. 'type': message_type,
  2629. 'pattern': None,
  2630. 'channel': response[1],
  2631. 'data': response[2]
  2632. }
  2633. # if this is an unsubscribe message, remove it from memory
  2634. if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
  2635. subscribed_dict = None
  2636. if message_type == 'punsubscribe':
  2637. subscribed_dict = self.patterns
  2638. else:
  2639. subscribed_dict = self.channels
  2640. try:
  2641. del subscribed_dict[message['channel']]
  2642. except KeyError:
  2643. pass
  2644. if message_type in self.PUBLISH_MESSAGE_TYPES:
  2645. # if there's a message handler, invoke it
  2646. handler = None
  2647. if message_type == 'pmessage':
  2648. handler = self.patterns.get(message['pattern'], None)
  2649. else:
  2650. handler = self.channels.get(message['channel'], None)
  2651. if handler:
  2652. handler(message)
  2653. return None
  2654. elif message_type != 'pong':
  2655. # this is a subscribe/unsubscribe message. ignore if we don't
  2656. # want them
  2657. if ignore_subscribe_messages or self.ignore_subscribe_messages:
  2658. return None
  2659. return message
  2660. def run_in_thread(self, sleep_time=0, daemon=False):
  2661. for channel, handler in iteritems(self.channels):
  2662. if handler is None:
  2663. raise PubSubError("Channel: '%s' has no handler registered" %
  2664. channel)
  2665. for pattern, handler in iteritems(self.patterns):
  2666. if handler is None:
  2667. raise PubSubError("Pattern: '%s' has no handler registered" %
  2668. pattern)
  2669. thread = PubSubWorkerThread(self, sleep_time, daemon=daemon)
  2670. thread.start()
  2671. return thread
  2672. class PubSubWorkerThread(threading.Thread):
  2673. def __init__(self, pubsub, sleep_time, daemon=False):
  2674. super(PubSubWorkerThread, self).__init__()
  2675. self.daemon = daemon
  2676. self.pubsub = pubsub
  2677. self.sleep_time = sleep_time
  2678. self._running = False
  2679. def run(self):
  2680. if self._running:
  2681. return
  2682. self._running = True
  2683. pubsub = self.pubsub
  2684. sleep_time = self.sleep_time
  2685. while pubsub.subscribed:
  2686. pubsub.get_message(ignore_subscribe_messages=True,
  2687. timeout=sleep_time)
  2688. pubsub.close()
  2689. self._running = False
  2690. def stop(self):
  2691. # stopping simply unsubscribes from all channels and patterns.
  2692. # the unsubscribe responses that are generated will short circuit
  2693. # the loop in run(), calling pubsub.close() to clean up the connection
  2694. self.pubsub.unsubscribe()
  2695. self.pubsub.punsubscribe()
  2696. class Pipeline(Redis):
  2697. """
  2698. Pipelines provide a way to transmit multiple commands to the Redis server
  2699. in one transmission. This is convenient for batch processing, such as
  2700. saving all the values in a list to Redis.
  2701. All commands executed within a pipeline are wrapped with MULTI and EXEC
  2702. calls. This guarantees all commands executed in the pipeline will be
  2703. executed atomically.
  2704. Any command raising an exception does *not* halt the execution of
  2705. subsequent commands in the pipeline. Instead, the exception is caught
  2706. and its instance is placed into the response list returned by execute().
  2707. Code iterating over the response list should be able to deal with an
  2708. instance of an exception as a potential value. In general, these will be
  2709. ResponseError exceptions, such as those raised when issuing a command
  2710. on a key of a different datatype.
  2711. """
  2712. UNWATCH_COMMANDS = {'DISCARD', 'EXEC', 'UNWATCH'}
  2713. def __init__(self, connection_pool, response_callbacks, transaction,
  2714. shard_hint):
  2715. self.connection_pool = connection_pool
  2716. self.connection = None
  2717. self.response_callbacks = response_callbacks
  2718. self.transaction = transaction
  2719. self.shard_hint = shard_hint
  2720. self.watching = False
  2721. self.reset()
  2722. def __enter__(self):
  2723. return self
  2724. def __exit__(self, exc_type, exc_value, traceback):
  2725. self.reset()
  2726. def __del__(self):
  2727. try:
  2728. self.reset()
  2729. except Exception:
  2730. pass
  2731. def __len__(self):
  2732. return len(self.command_stack)
  2733. def reset(self):
  2734. self.command_stack = []
  2735. self.scripts = set()
  2736. # make sure to reset the connection state in the event that we were
  2737. # watching something
  2738. if self.watching and self.connection:
  2739. try:
  2740. # call this manually since our unwatch or
  2741. # immediate_execute_command methods can call reset()
  2742. self.connection.send_command('UNWATCH')
  2743. self.connection.read_response()
  2744. except ConnectionError:
  2745. # disconnect will also remove any previous WATCHes
  2746. self.connection.disconnect()
  2747. # clean up the other instance attributes
  2748. self.watching = False
  2749. self.explicit_transaction = False
  2750. # we can safely return the connection to the pool here since we're
  2751. # sure we're no longer WATCHing anything
  2752. if self.connection:
  2753. self.connection_pool.release(self.connection)
  2754. self.connection = None
  2755. def multi(self):
  2756. """
  2757. Start a transactional block of the pipeline after WATCH commands
  2758. are issued. End the transactional block with `execute`.
  2759. """
  2760. if self.explicit_transaction:
  2761. raise RedisError('Cannot issue nested calls to MULTI')
  2762. if self.command_stack:
  2763. raise RedisError('Commands without an initial WATCH have already '
  2764. 'been issued')
  2765. self.explicit_transaction = True
  2766. def execute_command(self, *args, **kwargs):
  2767. if (self.watching or args[0] == 'WATCH') and \
  2768. not self.explicit_transaction:
  2769. return self.immediate_execute_command(*args, **kwargs)
  2770. return self.pipeline_execute_command(*args, **kwargs)
  2771. def immediate_execute_command(self, *args, **options):
  2772. """
  2773. Execute a command immediately, but don't auto-retry on a
  2774. ConnectionError if we're already WATCHing a variable. Used when
  2775. issuing WATCH or subsequent commands retrieving their values but before
  2776. MULTI is called.
  2777. """
  2778. command_name = args[0]
  2779. conn = self.connection
  2780. # if this is the first call, we need a connection
  2781. if not conn:
  2782. conn = self.connection_pool.get_connection(command_name,
  2783. self.shard_hint)
  2784. self.connection = conn
  2785. try:
  2786. conn.send_command(*args)
  2787. return self.parse_response(conn, command_name, **options)
  2788. except (ConnectionError, TimeoutError) as e:
  2789. conn.disconnect()
  2790. if not conn.retry_on_timeout and isinstance(e, TimeoutError):
  2791. raise
  2792. # if we're not already watching, we can safely retry the command
  2793. try:
  2794. if not self.watching:
  2795. conn.send_command(*args)
  2796. return self.parse_response(conn, command_name, **options)
  2797. except ConnectionError:
  2798. # the retry failed so cleanup.
  2799. conn.disconnect()
  2800. self.reset()
  2801. raise
  2802. def pipeline_execute_command(self, *args, **options):
  2803. """
  2804. Stage a command to be executed when execute() is next called
  2805. Returns the current Pipeline object back so commands can be
  2806. chained together, such as:
  2807. pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
  2808. At some other point, you can then run: pipe.execute(),
  2809. which will execute all commands queued in the pipe.
  2810. """
  2811. self.command_stack.append((args, options))
  2812. return self
  2813. def _execute_transaction(self, connection, commands, raise_on_error):
  2814. cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
  2815. all_cmds = connection.pack_commands([args for args, options in cmds
  2816. if EMPTY_RESPONSE not in options])
  2817. connection.send_packed_command(all_cmds)
  2818. errors = []
  2819. # parse off the response for MULTI
  2820. # NOTE: we need to handle ResponseErrors here and continue
  2821. # so that we read all the additional command messages from
  2822. # the socket
  2823. try:
  2824. self.parse_response(connection, '_')
  2825. except ResponseError:
  2826. errors.append((0, sys.exc_info()[1]))
  2827. # and all the other commands
  2828. for i, command in enumerate(commands):
  2829. if EMPTY_RESPONSE in command[1]:
  2830. errors.append((i, command[1][EMPTY_RESPONSE]))
  2831. else:
  2832. try:
  2833. self.parse_response(connection, '_')
  2834. except ResponseError:
  2835. ex = sys.exc_info()[1]
  2836. self.annotate_exception(ex, i + 1, command[0])
  2837. errors.append((i, ex))
  2838. # parse the EXEC.
  2839. try:
  2840. response = self.parse_response(connection, '_')
  2841. except ExecAbortError:
  2842. if self.explicit_transaction:
  2843. self.immediate_execute_command('DISCARD')
  2844. if errors:
  2845. raise errors[0][1]
  2846. raise sys.exc_info()[1]
  2847. if response is None:
  2848. raise WatchError("Watched variable changed.")
  2849. # put any parse errors into the response
  2850. for i, e in errors:
  2851. response.insert(i, e)
  2852. if len(response) != len(commands):
  2853. self.connection.disconnect()
  2854. raise ResponseError("Wrong number of response items from "
  2855. "pipeline execution")
  2856. # find any errors in the response and raise if necessary
  2857. if raise_on_error:
  2858. self.raise_first_error(commands, response)
  2859. # We have to run response callbacks manually
  2860. data = []
  2861. for r, cmd in izip(response, commands):
  2862. if not isinstance(r, Exception):
  2863. args, options = cmd
  2864. command_name = args[0]
  2865. if command_name in self.response_callbacks:
  2866. r = self.response_callbacks[command_name](r, **options)
  2867. data.append(r)
  2868. return data
  2869. def _execute_pipeline(self, connection, commands, raise_on_error):
  2870. # build up all commands into a single request to increase network perf
  2871. all_cmds = connection.pack_commands([args for args, _ in commands])
  2872. connection.send_packed_command(all_cmds)
  2873. response = []
  2874. for args, options in commands:
  2875. try:
  2876. response.append(
  2877. self.parse_response(connection, args[0], **options))
  2878. except ResponseError:
  2879. response.append(sys.exc_info()[1])
  2880. if raise_on_error:
  2881. self.raise_first_error(commands, response)
  2882. return response
  2883. def raise_first_error(self, commands, response):
  2884. for i, r in enumerate(response):
  2885. if isinstance(r, ResponseError):
  2886. self.annotate_exception(r, i + 1, commands[i][0])
  2887. raise r
  2888. def annotate_exception(self, exception, number, command):
  2889. cmd = ' '.join(imap(safe_unicode, command))
  2890. msg = 'Command # %d (%s) of pipeline caused error: %s' % (
  2891. number, cmd, safe_unicode(exception.args[0]))
  2892. exception.args = (msg,) + exception.args[1:]
  2893. def parse_response(self, connection, command_name, **options):
  2894. result = Redis.parse_response(
  2895. self, connection, command_name, **options)
  2896. if command_name in self.UNWATCH_COMMANDS:
  2897. self.watching = False
  2898. elif command_name == 'WATCH':
  2899. self.watching = True
  2900. return result
  2901. def load_scripts(self):
  2902. # make sure all scripts that are about to be run on this pipeline exist
  2903. scripts = list(self.scripts)
  2904. immediate = self.immediate_execute_command
  2905. shas = [s.sha for s in scripts]
  2906. # we can't use the normal script_* methods because they would just
  2907. # get buffered in the pipeline.
  2908. exists = immediate('SCRIPT EXISTS', *shas)
  2909. if not all(exists):
  2910. for s, exist in izip(scripts, exists):
  2911. if not exist:
  2912. s.sha = immediate('SCRIPT LOAD', s.script)
  2913. def execute(self, raise_on_error=True):
  2914. "Execute all the commands in the current pipeline"
  2915. stack = self.command_stack
  2916. if not stack:
  2917. return []
  2918. if self.scripts:
  2919. self.load_scripts()
  2920. if self.transaction or self.explicit_transaction:
  2921. execute = self._execute_transaction
  2922. else:
  2923. execute = self._execute_pipeline
  2924. conn = self.connection
  2925. if not conn:
  2926. conn = self.connection_pool.get_connection('MULTI',
  2927. self.shard_hint)
  2928. # assign to self.connection so reset() releases the connection
  2929. # back to the pool after we're done
  2930. self.connection = conn
  2931. try:
  2932. return execute(conn, stack, raise_on_error)
  2933. except (ConnectionError, TimeoutError) as e:
  2934. conn.disconnect()
  2935. if not conn.retry_on_timeout and isinstance(e, TimeoutError):
  2936. raise
  2937. # if we were watching a variable, the watch is no longer valid
  2938. # since this connection has died. raise a WatchError, which
  2939. # indicates the user should retry his transaction. If this is more
  2940. # than a temporary failure, the WATCH that the user next issues
  2941. # will fail, propegating the real ConnectionError
  2942. if self.watching:
  2943. raise WatchError("A ConnectionError occured on while watching "
  2944. "one or more keys")
  2945. # otherwise, it's safe to retry since the transaction isn't
  2946. # predicated on any state
  2947. return execute(conn, stack, raise_on_error)
  2948. finally:
  2949. self.reset()
  2950. def watch(self, *names):
  2951. "Watches the values at keys ``names``"
  2952. if self.explicit_transaction:
  2953. raise RedisError('Cannot issue a WATCH after a MULTI')
  2954. return self.execute_command('WATCH', *names)
  2955. def unwatch(self):
  2956. "Unwatches all previously specified keys"
  2957. return self.watching and self.execute_command('UNWATCH') or True
  2958. class Script(object):
  2959. "An executable Lua script object returned by ``register_script``"
  2960. def __init__(self, registered_client, script):
  2961. self.registered_client = registered_client
  2962. self.script = script
  2963. # Precalculate and store the SHA1 hex digest of the script.
  2964. if isinstance(script, basestring):
  2965. # We need the encoding from the client in order to generate an
  2966. # accurate byte representation of the script
  2967. encoder = registered_client.connection_pool.get_encoder()
  2968. script = encoder.encode(script)
  2969. self.sha = hashlib.sha1(script).hexdigest()
  2970. def __call__(self, keys=[], args=[], client=None):
  2971. "Execute the script, passing any required ``args``"
  2972. if client is None:
  2973. client = self.registered_client
  2974. args = tuple(keys) + tuple(args)
  2975. # make sure the Redis server knows about the script
  2976. if isinstance(client, Pipeline):
  2977. # Make sure the pipeline can register the script before executing.
  2978. client.scripts.add(self)
  2979. try:
  2980. return client.evalsha(self.sha, len(keys), *args)
  2981. except NoScriptError:
  2982. # Maybe the client is pointed to a differnet server than the client
  2983. # that created this instance?
  2984. # Overwrite the sha just in case there was a discrepancy.
  2985. self.sha = client.script_load(self.script)
  2986. return client.evalsha(self.sha, len(keys), *args)
  2987. class BitFieldOperation(object):
  2988. """
  2989. Command builder for BITFIELD commands.
  2990. """
  2991. def __init__(self, client, key, default_overflow=None):
  2992. self.client = client
  2993. self.key = key
  2994. self._default_overflow = default_overflow
  2995. self.reset()
  2996. def reset(self):
  2997. """
  2998. Reset the state of the instance to when it was constructed
  2999. """
  3000. self.operations = []
  3001. self._last_overflow = 'WRAP'
  3002. self.overflow(self._default_overflow or self._last_overflow)
  3003. def overflow(self, overflow):
  3004. """
  3005. Update the overflow algorithm of successive INCRBY operations
  3006. :param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the
  3007. Redis docs for descriptions of these algorithmsself.
  3008. :returns: a :py:class:`BitFieldOperation` instance.
  3009. """
  3010. overflow = overflow.upper()
  3011. if overflow != self._last_overflow:
  3012. self._last_overflow = overflow
  3013. self.operations.append(('OVERFLOW', overflow))
  3014. return self
  3015. def incrby(self, fmt, offset, increment, overflow=None):
  3016. """
  3017. Increment a bitfield by a given amount.
  3018. :param fmt: format-string for the bitfield being updated, e.g. 'u8'
  3019. for an unsigned 8-bit integer.
  3020. :param offset: offset (in number of bits). If prefixed with a
  3021. '#', this is an offset multiplier, e.g. given the arguments
  3022. fmt='u8', offset='#2', the offset will be 16.
  3023. :param int increment: value to increment the bitfield by.
  3024. :param str overflow: overflow algorithm. Defaults to WRAP, but other
  3025. acceptable values are SAT and FAIL. See the Redis docs for
  3026. descriptions of these algorithms.
  3027. :returns: a :py:class:`BitFieldOperation` instance.
  3028. """
  3029. if overflow is not None:
  3030. self.overflow(overflow)
  3031. self.operations.append(('INCRBY', fmt, offset, increment))
  3032. return self
  3033. def get(self, fmt, offset):
  3034. """
  3035. Get the value of a given bitfield.
  3036. :param fmt: format-string for the bitfield being read, e.g. 'u8' for
  3037. an unsigned 8-bit integer.
  3038. :param offset: offset (in number of bits). If prefixed with a
  3039. '#', this is an offset multiplier, e.g. given the arguments
  3040. fmt='u8', offset='#2', the offset will be 16.
  3041. :returns: a :py:class:`BitFieldOperation` instance.
  3042. """
  3043. self.operations.append(('GET', fmt, offset))
  3044. return self
  3045. def set(self, fmt, offset, value):
  3046. """
  3047. Set the value of a given bitfield.
  3048. :param fmt: format-string for the bitfield being read, e.g. 'u8' for
  3049. an unsigned 8-bit integer.
  3050. :param offset: offset (in number of bits). If prefixed with a
  3051. '#', this is an offset multiplier, e.g. given the arguments
  3052. fmt='u8', offset='#2', the offset will be 16.
  3053. :param int value: value to set at the given position.
  3054. :returns: a :py:class:`BitFieldOperation` instance.
  3055. """
  3056. self.operations.append(('SET', fmt, offset, value))
  3057. return self
  3058. @property
  3059. def command(self):
  3060. cmd = ['BITFIELD', self.key]
  3061. for ops in self.operations:
  3062. cmd.extend(ops)
  3063. return cmd
  3064. def execute(self):
  3065. """
  3066. Execute the operation(s) in a single BITFIELD command. The return value
  3067. is a list of values corresponding to each operation. If the client
  3068. used to create this instance was a pipeline, the list of values
  3069. will be present within the pipeline's execute.
  3070. """
  3071. command = self.command
  3072. self.reset()
  3073. return self.client.execute_command(*command)