X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=python%2Fovs%2Fjson.py;h=d329ee41004c456fc9ea7bd0860f243fcc32f00d;hb=1e3f34c7693bcabae8e443ac1b246680ef9b60e2;hp=1e26a62909240903acd476a61764f01782c49034;hpb=991559357f6a03c3a5b70c053c8c2554aa8d5ee4;p=sliver-openvswitch.git diff --git a/python/ovs/json.py b/python/ovs/json.py index 1e26a6290..d329ee410 100644 --- a/python/ovs/json.py +++ b/python/ovs/json.py @@ -1,4 +1,4 @@ -# Copyright (c) 2010 Nicira Networks +# Copyright (c) 2010, 2011, 2012 Nicira, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ import re import StringIO import sys +__pychecker__ = 'no-stringiter' + escapes = {ord('"'): u"\\\"", ord("\\"): u"\\\\", ord("\b"): u"\\b", @@ -23,62 +25,87 @@ escapes = {ord('"'): u"\\\"", ord("\n"): u"\\n", ord("\r"): u"\\r", ord("\t"): u"\\t"} -for i in range(32): - if i not in escapes: - escapes[i] = u"\\u%04x" % i - -def __dump_string(stream, s): - stream.write(u"\"") - for c in s: - x = ord(c) - escape = escapes.get(x) - if escape: - stream.write(escape) +for esc in range(32): + if esc not in escapes: + escapes[esc] = u"\\u%04x" % esc + +SPACES_PER_LEVEL = 2 + + +class _Serializer(object): + def __init__(self, stream, pretty, sort_keys): + self.stream = stream + self.pretty = pretty + self.sort_keys = sort_keys + self.depth = 0 + + def __serialize_string(self, s): + self.stream.write(u'"%s"' % ''.join(escapes.get(ord(c), c) for c in s)) + + def __indent_line(self): + if self.pretty: + self.stream.write('\n') + self.stream.write(' ' * (SPACES_PER_LEVEL * self.depth)) + + def serialize(self, obj): + if obj is None: + self.stream.write(u"null") + elif obj is False: + self.stream.write(u"false") + elif obj is True: + self.stream.write(u"true") + elif type(obj) in (int, long): + self.stream.write(u"%d" % obj) + elif type(obj) == float: + self.stream.write("%.15g" % obj) + elif type(obj) == unicode: + self.__serialize_string(obj) + elif type(obj) == str: + self.__serialize_string(unicode(obj)) + elif type(obj) == dict: + self.stream.write(u"{") + + self.depth += 1 + self.__indent_line() + + if self.sort_keys: + items = sorted(obj.items()) + else: + items = obj.iteritems() + for i, (key, value) in enumerate(items): + if i > 0: + self.stream.write(u",") + self.__indent_line() + self.__serialize_string(unicode(key)) + self.stream.write(u":") + if self.pretty: + self.stream.write(u' ') + self.serialize(value) + + self.stream.write(u"}") + self.depth -= 1 + elif type(obj) in (list, tuple): + self.stream.write(u"[") + self.depth += 1 + + if obj: + self.__indent_line() + + for i, value in enumerate(obj): + if i > 0: + self.stream.write(u",") + self.__indent_line() + self.serialize(value) + + self.depth -= 1 + self.stream.write(u"]") else: - stream.write(c) - stream.write(u"\"") + raise Exception("can't serialize %s as JSON" % obj) + def to_stream(obj, stream, pretty=False, sort_keys=True): - if obj is None: - stream.write(u"null") - elif obj is False: - stream.write(u"false") - elif obj is True: - stream.write(u"true") - elif type(obj) in (int, long): - stream.write(u"%d" % obj) - elif type(obj) == float: - stream.write("%.15g" % obj) - elif type(obj) == unicode: - __dump_string(stream, obj) - elif type(obj) == str: - __dump_string(stream, unicode(obj)) - elif type(obj) == dict: - stream.write(u"{") - if sort_keys: - items = sorted(obj.items()) - else: - items = obj.iteritems() - i = 0 - for key, value in items: - if i > 0: - stream.write(u",") - i += 1 - __dump_string(stream, unicode(key)) - stream.write(u":") - to_stream(value, stream, pretty, sort_keys) - stream.write(u"}") - elif type(obj) in (list, tuple): - stream.write(u"[") - i = 0 - for value in obj: - if i > 0: - stream.write(u",") - i += 1 - to_stream(value, stream, pretty, sort_keys) - stream.write(u"]") - else: - raise Error("can't serialize %s as JSON" % obj) + _Serializer(stream, pretty, sort_keys).serialize(obj) + def to_file(obj, name, pretty=False, sort_keys=True): stream = open(name, "w") @@ -87,6 +114,7 @@ def to_file(obj, name, pretty=False, sort_keys=True): finally: stream.close() + def to_string(obj, pretty=False, sort_keys=True): output = StringIO.StringIO() to_stream(obj, output, pretty, sort_keys) @@ -94,6 +122,7 @@ def to_string(obj, pretty=False, sort_keys=True): output.close() return s + def from_stream(stream): p = Parser(check_trailer=True) while True: @@ -102,6 +131,7 @@ def from_stream(stream): break return p.finish() + def from_file(name): stream = open(name, "r") try: @@ -109,18 +139,19 @@ def from_file(name): finally: stream.close() + def from_string(s): try: s = unicode(s, 'utf-8') except UnicodeDecodeError, e: - seq = ' '.join(["0x%2x" % ord(c) for c in e.object[e.start:e.end]]) - raise Error("\"%s\" is not a valid UTF-8 string: " - "invalid UTF-8 sequence %s" % (s, seq), - tag="constraint violation") + seq = ' '.join(["0x%2x" % ord(c) + for c in e.object[e.start:e.end] if ord(c) >= 0x80]) + return ("not a valid UTF-8 string: invalid UTF-8 sequence %s" % seq) p = Parser(check_trailer=True) p.feed(s) return p.finish() + class Parser(object): ## Maximum height of parsing stack. ## MAX_HEIGHT = 1000 @@ -134,7 +165,7 @@ class Parser(object): self.line_number = 0 self.column_number = 0 self.byte_number = 0 - + # Parsing. self.parse_state = Parser.__parse_start self.stack = [] @@ -146,16 +177,21 @@ class Parser(object): def __lex_start_space(self, c): pass + def __lex_start_alpha(self, c): self.buffer = c self.lex_state = Parser.__lex_keyword + def __lex_start_token(self, c): self.__parser_input(c) + def __lex_start_number(self, c): self.buffer = c self.lex_state = Parser.__lex_number - def __lex_start_string(self, c): + + def __lex_start_string(self, _): self.lex_state = Parser.__lex_string + def __lex_start_error(self, c): if ord(c) >= 32 and ord(c) < 128: self.__error("invalid character '%s'" % c) @@ -172,6 +208,7 @@ class Parser(object): for c in "-0123456789": __lex_start_actions[c] = __lex_start_number __lex_start_actions['"'] = __lex_start_string + def __lex_start(self, c): Parser.__lex_start_actions.get( c, Parser.__lex_start_error)(self, c) @@ -180,6 +217,7 @@ class Parser(object): __lex_alpha = {} for c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": __lex_alpha[c] = True + def __lex_finish_keyword(self): if self.buffer == "false": self.__parser_input(False) @@ -189,6 +227,7 @@ class Parser(object): self.__parser_input(None) else: self.__error("invalid keyword '%s'" % self.buffer) + def __lex_keyword(self, c): if c in Parser.__lex_alpha: self.buffer += c @@ -197,12 +236,14 @@ class Parser(object): self.__lex_finish_keyword() return False - __number_re = re.compile("(-)?(0|[1-9][0-9]*)(?:\.([0-9]+))?(?:[eE]([-+]?[0-9]+))?$") + __number_re = re.compile("(-)?(0|[1-9][0-9]*)" + "(?:\.([0-9]+))?(?:[eE]([-+]?[0-9]+))?$") + def __lex_finish_number(self): s = self.buffer m = Parser.__number_re.match(s) if m: - sign, integer, fraction, exp = m.groups() + sign, integer, fraction, exp = m.groups() if (exp is not None and (long(exp) > sys.maxint or long(exp) < -sys.maxint - 1)): self.__error("exponent outside valid range") @@ -225,16 +266,16 @@ class Parser(object): if significand == 0: self.__parser_input(0) return - elif significand <= 2**63: - while pow10 > 0 and significand <= 2*63: + elif significand <= 2 ** 63: + while pow10 > 0 and significand <= 2 ** 63: significand *= 10 pow10 -= 1 while pow10 < 0 and significand % 10 == 0: significand /= 10 pow10 += 1 if (pow10 == 0 and - ((not sign and significand < 2**63) or - (sign and significand <= 2**63))): + ((not sign and significand < 2 ** 63) or + (sign and significand <= 2 ** 63))): if sign: self.__parser_input(-significand) else: @@ -259,7 +300,7 @@ class Parser(object): self.__error("exponent must contain at least one digit") else: self.__error("syntax error in number") - + def __lex_number(self, c): if c in ".0123456789eE-+": self.buffer += c @@ -269,6 +310,7 @@ class Parser(object): return False __4hex_re = re.compile("[0-9a-fA-F]{4}") + def __lex_4hex(self, s): if len(s) < 4: self.__error("quoted string ends within \\u escape") @@ -278,16 +320,19 @@ class Parser(object): self.__error("null bytes not supported in quoted strings") else: return int(s, 16) + @staticmethod def __is_leading_surrogate(c): """Returns true if 'c' is a Unicode code point for a leading surrogate.""" return c >= 0xd800 and c <= 0xdbff + @staticmethod def __is_trailing_surrogate(c): """Returns true if 'c' is a Unicode code point for a trailing surrogate.""" return c >= 0xdc00 and c <= 0xdfff + @staticmethod def __utf16_decode_surrogate_pair(leading, trailing): """Returns the unicode code point corresponding to leading surrogate @@ -310,6 +355,7 @@ class Parser(object): "n": u"\n", "r": u"\r", "t": u"\t"} + def __lex_finish_string(self): inp = self.buffer out = u"" @@ -332,7 +378,7 @@ class Parser(object): elif inp[0] != u'u': self.__error("bad escape \\%s" % inp[0]) return - + c0 = self.__lex_4hex(inp[1:5]) if c0 is None: return @@ -360,6 +406,7 @@ class Parser(object): self.buffer += c self.lex_state = Parser.__lex_string return True + def __lex_string(self, c): if c == '\\': self.buffer += c @@ -373,72 +420,72 @@ class Parser(object): return True def __lex_input(self, c): - self.byte_number += 1 - if c == '\n': - self.column_number = 0 - self.line_number += 1 - else: - self.column_number += 1 - eat = self.lex_state(self, c) assert eat is True or eat is False return eat - def __parse_start(self, token, string): + def __parse_start(self, token, unused_string): if token == '{': self.__push_object() elif token == '[': self.__push_array() else: self.__error("syntax error at beginning of input") - def __parse_end(self, token, string): + + def __parse_end(self, unused_token, unused_string): self.__error("trailing garbage at end of input") + def __parse_object_init(self, token, string): if token == '}': self.__parser_pop() else: self.__parse_object_name(token, string) + def __parse_object_name(self, token, string): if token == 'string': self.member_name = string self.parse_state = Parser.__parse_object_colon else: self.__error("syntax error parsing object expecting string") - def __parse_object_colon(self, token, string): + + def __parse_object_colon(self, token, unused_string): if token == ":": self.parse_state = Parser.__parse_object_value else: self.__error("syntax error parsing object expecting ':'") + def __parse_object_value(self, token, string): self.__parse_value(token, string, Parser.__parse_object_next) - def __parse_object_next(self, token, string): + + def __parse_object_next(self, token, unused_string): if token == ",": self.parse_state = Parser.__parse_object_name elif token == "}": self.__parser_pop() else: self.__error("syntax error expecting '}' or ','") + def __parse_array_init(self, token, string): if token == ']': self.__parser_pop() else: self.__parse_array_value(token, string) + def __parse_array_value(self, token, string): self.__parse_value(token, string, Parser.__parse_array_next) - def __parse_array_next(self, token, string): + + def __parse_array_next(self, token, unused_string): if token == ",": self.parse_state = Parser.__parse_array_value elif token == "]": self.__parser_pop() else: self.__error("syntax error expecting ']' or ','") + def __parser_input(self, token, string=None): self.lex_state = Parser.__lex_start self.buffer = "" - #old_state = self.parse_state self.parse_state(self, token, string) - #print ("token=%s string=%s old_state=%s new_state=%s" - # % (token, string, old_state, self.parse_state)) def __put_value(self, value): top = self.stack[-1] @@ -456,8 +503,10 @@ class Parser(object): else: self.__error("input exceeds maximum nesting depth %d" % Parser.MAX_HEIGHT) + def __push_object(self): self.__parser_push({}, Parser.__parse_object_init) + def __push_array(self): self.__parser_push([], Parser.__parse_array_init) @@ -501,7 +550,16 @@ class Parser(object): while True: if self.done or i >= len(s): return i - if self.__lex_input(s[i]): + + c = s[i] + if self.__lex_input(c): + self.byte_number += 1 + if c == '\n': + self.column_number = 0 + self.line_number += 1 + else: + self.column_number += 1 + i += 1 def is_done(self):