Devel
Threads by month
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2007 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2006 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2005 -----
- December
February 2011
- 73 participants
- 267 discussions
17 Feb '11
---
docs/apibuild.py | 3194 +++++++++++++++++++++++++++---------------------------
1 files changed, 1597 insertions(+), 1597 deletions(-)
diff --git a/docs/apibuild.py b/docs/apibuild.py
index 895a313..506932d 100755
--- a/docs/apibuild.py
+++ b/docs/apibuild.py
@@ -72,34 +72,34 @@ class identifier:
def __init__(self, name, header=None, module=None, type=None, lineno = 0,
info=None, extra=None, conditionals = None):
self.name = name
- self.header = header
- self.module = module
- self.type = type
- self.info = info
- self.extra = extra
- self.lineno = lineno
- self.static = 0
- if conditionals == None or len(conditionals) == 0:
- self.conditionals = None
- else:
- self.conditionals = conditionals[:]
- if self.name == debugsym:
- print "=> define %s : %s" % (debugsym, (module, type, info,
- extra, conditionals))
+ self.header = header
+ self.module = module
+ self.type = type
+ self.info = info
+ self.extra = extra
+ self.lineno = lineno
+ self.static = 0
+ if conditionals == None or len(conditionals) == 0:
+ self.conditionals = None
+ else:
+ self.conditionals = conditionals[:]
+ if self.name == debugsym:
+ print "=> define %s : %s" % (debugsym, (module, type, info,
+ extra, conditionals))
def __repr__(self):
r = "%s %s:" % (self.type, self.name)
- if self.static:
- r = r + " static"
- if self.module != None:
- r = r + " from %s" % (self.module)
- if self.info != None:
- r = r + " " + `self.info`
- if self.extra != None:
- r = r + " " + `self.extra`
- if self.conditionals != None:
- r = r + " " + `self.conditionals`
- return r
+ if self.static:
+ r = r + " static"
+ if self.module != None:
+ r = r + " from %s" % (self.module)
+ if self.info != None:
+ r = r + " " + `self.info`
+ if self.extra != None:
+ r = r + " " + `self.extra`
+ if self.conditionals != None:
+ r = r + " " + `self.conditionals`
+ return r
def set_header(self, header):
@@ -117,10 +117,10 @@ class identifier:
def set_static(self, static):
self.static = static
def set_conditionals(self, conditionals):
- if conditionals == None or len(conditionals) == 0:
- self.conditionals = None
- else:
- self.conditionals = conditionals[:]
+ if conditionals == None or len(conditionals) == 0:
+ self.conditionals = None
+ else:
+ self.conditionals = conditionals[:]
def get_name(self):
return self.name
@@ -143,96 +143,96 @@ class identifier:
def update(self, header, module, type = None, info = None, extra=None,
conditionals=None):
- if self.name == debugsym:
- print "=> update %s : %s" % (debugsym, (module, type, info,
- extra, conditionals))
+ if self.name == debugsym:
+ print "=> update %s : %s" % (debugsym, (module, type, info,
+ extra, conditionals))
if header != None and self.header == None:
- self.set_header(module)
+ self.set_header(module)
if module != None and (self.module == None or self.header == self.module):
- self.set_module(module)
+ self.set_module(module)
if type != None and self.type == None:
- self.set_type(type)
+ self.set_type(type)
if info != None:
- self.set_info(info)
+ self.set_info(info)
if extra != None:
- self.set_extra(extra)
+ self.set_extra(extra)
if conditionals != None:
- self.set_conditionals(conditionals)
+ self.set_conditionals(conditionals)
class index:
def __init__(self, name = "noname"):
self.name = name
self.identifiers = {}
self.functions = {}
- self.variables = {}
- self.includes = {}
- self.structs = {}
- self.enums = {}
- self.typedefs = {}
- self.macros = {}
- self.references = {}
- self.info = {}
+ self.variables = {}
+ self.includes = {}
+ self.structs = {}
+ self.enums = {}
+ self.typedefs = {}
+ self.macros = {}
+ self.references = {}
+ self.info = {}
def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
- return None
+ return None
d = None
try:
- d = self.identifiers[name]
- d.update(header, module, type, lineno, info, extra, conditionals)
- except:
- d = identifier(name, header, module, type, lineno, info, extra, conditionals)
- self.identifiers[name] = d
+ d = self.identifiers[name]
+ d.update(header, module, type, lineno, info, extra, conditionals)
+ except:
+ d = identifier(name, header, module, type, lineno, info, extra, conditionals)
+ self.identifiers[name] = d
- if d != None and static == 1:
- d.set_static(1)
+ if d != None and static == 1:
+ d.set_static(1)
- if d != None and name != None and type != None:
- self.references[name] = d
+ if d != None and name != None and type != None:
+ self.references[name] = d
- if name == debugsym:
- print "New ref: %s" % (d)
+ if name == debugsym:
+ print "New ref: %s" % (d)
- return d
+ return d
def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
- return None
+ return None
d = None
try:
- d = self.identifiers[name]
- d.update(header, module, type, lineno, info, extra, conditionals)
- except:
- d = identifier(name, header, module, type, lineno, info, extra, conditionals)
- self.identifiers[name] = d
-
- if d != None and static == 1:
- d.set_static(1)
-
- if d != None and name != None and type != None:
- if type == "function":
- self.functions[name] = d
- elif type == "functype":
- self.functions[name] = d
- elif type == "variable":
- self.variables[name] = d
- elif type == "include":
- self.includes[name] = d
- elif type == "struct":
- self.structs[name] = d
- elif type == "enum":
- self.enums[name] = d
- elif type == "typedef":
- self.typedefs[name] = d
- elif type == "macro":
- self.macros[name] = d
- else:
- print "Unable to register type ", type
-
- if name == debugsym:
- print "New symbol: %s" % (d)
-
- return d
+ d = self.identifiers[name]
+ d.update(header, module, type, lineno, info, extra, conditionals)
+ except:
+ d = identifier(name, header, module, type, lineno, info, extra, conditionals)
+ self.identifiers[name] = d
+
+ if d != None and static == 1:
+ d.set_static(1)
+
+ if d != None and name != None and type != None:
+ if type == "function":
+ self.functions[name] = d
+ elif type == "functype":
+ self.functions[name] = d
+ elif type == "variable":
+ self.variables[name] = d
+ elif type == "include":
+ self.includes[name] = d
+ elif type == "struct":
+ self.structs[name] = d
+ elif type == "enum":
+ self.enums[name] = d
+ elif type == "typedef":
+ self.typedefs[name] = d
+ elif type == "macro":
+ self.macros[name] = d
+ else:
+ print "Unable to register type ", type
+
+ if name == debugsym:
+ print "New symbol: %s" % (d)
+
+ return d
def merge(self, idx):
for id in idx.functions.keys():
@@ -240,41 +240,41 @@ class index:
# macro might be used to override functions or variables
# definitions
#
- if self.macros.has_key(id):
- del self.macros[id]
- if self.functions.has_key(id):
- print "function %s from %s redeclared in %s" % (
- id, self.functions[id].header, idx.functions[id].header)
- else:
- self.functions[id] = idx.functions[id]
- self.identifiers[id] = idx.functions[id]
+ if self.macros.has_key(id):
+ del self.macros[id]
+ if self.functions.has_key(id):
+ print "function %s from %s redeclared in %s" % (
+ id, self.functions[id].header, idx.functions[id].header)
+ else:
+ self.functions[id] = idx.functions[id]
+ self.identifiers[id] = idx.functions[id]
for id in idx.variables.keys():
#
# macro might be used to override functions or variables
# definitions
#
- if self.macros.has_key(id):
- del self.macros[id]
- if self.variables.has_key(id):
- print "variable %s from %s redeclared in %s" % (
- id, self.variables[id].header, idx.variables[id].header)
- else:
- self.variables[id] = idx.variables[id]
- self.identifiers[id] = idx.variables[id]
+ if self.macros.has_key(id):
+ del self.macros[id]
+ if self.variables.has_key(id):
+ print "variable %s from %s redeclared in %s" % (
+ id, self.variables[id].header, idx.variables[id].header)
+ else:
+ self.variables[id] = idx.variables[id]
+ self.identifiers[id] = idx.variables[id]
for id in idx.structs.keys():
- if self.structs.has_key(id):
- print "struct %s from %s redeclared in %s" % (
- id, self.structs[id].header, idx.structs[id].header)
- else:
- self.structs[id] = idx.structs[id]
- self.identifiers[id] = idx.structs[id]
+ if self.structs.has_key(id):
+ print "struct %s from %s redeclared in %s" % (
+ id, self.structs[id].header, idx.structs[id].header)
+ else:
+ self.structs[id] = idx.structs[id]
+ self.identifiers[id] = idx.structs[id]
for id in idx.typedefs.keys():
- if self.typedefs.has_key(id):
- print "typedef %s from %s redeclared in %s" % (
- id, self.typedefs[id].header, idx.typedefs[id].header)
- else:
- self.typedefs[id] = idx.typedefs[id]
- self.identifiers[id] = idx.typedefs[id]
+ if self.typedefs.has_key(id):
+ print "typedef %s from %s redeclared in %s" % (
+ id, self.typedefs[id].header, idx.typedefs[id].header)
+ else:
+ self.typedefs[id] = idx.typedefs[id]
+ self.identifiers[id] = idx.typedefs[id]
for id in idx.macros.keys():
#
# macro might be used to override functions or variables
@@ -286,88 +286,88 @@ class index:
continue
if self.enums.has_key(id):
continue
- if self.macros.has_key(id):
- print "macro %s from %s redeclared in %s" % (
- id, self.macros[id].header, idx.macros[id].header)
- else:
- self.macros[id] = idx.macros[id]
- self.identifiers[id] = idx.macros[id]
+ if self.macros.has_key(id):
+ print "macro %s from %s redeclared in %s" % (
+ id, self.macros[id].header, idx.macros[id].header)
+ else:
+ self.macros[id] = idx.macros[id]
+ self.identifiers[id] = idx.macros[id]
for id in idx.enums.keys():
- if self.enums.has_key(id):
- print "enum %s from %s redeclared in %s" % (
- id, self.enums[id].header, idx.enums[id].header)
- else:
- self.enums[id] = idx.enums[id]
- self.identifiers[id] = idx.enums[id]
+ if self.enums.has_key(id):
+ print "enum %s from %s redeclared in %s" % (
+ id, self.enums[id].header, idx.enums[id].header)
+ else:
+ self.enums[id] = idx.enums[id]
+ self.identifiers[id] = idx.enums[id]
def merge_public(self, idx):
for id in idx.functions.keys():
- if self.functions.has_key(id):
- # check that function condition agrees with header
- if idx.functions[id].conditionals != \
- self.functions[id].conditionals:
- print "Header condition differs from Function for %s:" \
- % id
- print " H: %s" % self.functions[id].conditionals
- print " C: %s" % idx.functions[id].conditionals
- up = idx.functions[id]
- self.functions[id].update(None, up.module, up.type, up.info, up.extra)
- # else:
- # print "Function %s from %s is not declared in headers" % (
- # id, idx.functions[id].module)
- # TODO: do the same for variables.
+ if self.functions.has_key(id):
+ # check that function condition agrees with header
+ if idx.functions[id].conditionals != \
+ self.functions[id].conditionals:
+ print "Header condition differs from Function for %s:" \
+ % id
+ print " H: %s" % self.functions[id].conditionals
+ print " C: %s" % idx.functions[id].conditionals
+ up = idx.functions[id]
+ self.functions[id].update(None, up.module, up.type, up.info, up.extra)
+ # else:
+ # print "Function %s from %s is not declared in headers" % (
+ # id, idx.functions[id].module)
+ # TODO: do the same for variables.
def analyze_dict(self, type, dict):
count = 0
- public = 0
+ public = 0
for name in dict.keys():
- id = dict[name]
- count = count + 1
- if id.static == 0:
- public = public + 1
+ id = dict[name]
+ count = count + 1
+ if id.static == 0:
+ public = public + 1
if count != public:
- print " %d %s , %d public" % (count, type, public)
- elif count != 0:
- print " %d public %s" % (count, type)
+ print " %d %s , %d public" % (count, type, public)
+ elif count != 0:
+ print " %d public %s" % (count, type)
def analyze(self):
- self.analyze_dict("functions", self.functions)
- self.analyze_dict("variables", self.variables)
- self.analyze_dict("structs", self.structs)
- self.analyze_dict("typedefs", self.typedefs)
- self.analyze_dict("macros", self.macros)
+ self.analyze_dict("functions", self.functions)
+ self.analyze_dict("variables", self.variables)
+ self.analyze_dict("structs", self.structs)
+ self.analyze_dict("typedefs", self.typedefs)
+ self.analyze_dict("macros", self.macros)
class CLexer:
"""A lexer for the C language, tokenize the input by reading and
analyzing it line by line"""
def __init__(self, input):
self.input = input
- self.tokens = []
- self.line = ""
- self.lineno = 0
+ self.tokens = []
+ self.line = ""
+ self.lineno = 0
def getline(self):
line = ''
- while line == '':
- line = self.input.readline()
- if not line:
- return None
- self.lineno = self.lineno + 1
- line = string.lstrip(line)
- line = string.rstrip(line)
- if line == '':
- continue
- while line[-1] == '\\':
- line = line[:-1]
- n = self.input.readline()
- self.lineno = self.lineno + 1
- n = string.lstrip(n)
- n = string.rstrip(n)
- if not n:
- break
- else:
- line = line + n
+ while line == '':
+ line = self.input.readline()
+ if not line:
+ return None
+ self.lineno = self.lineno + 1
+ line = string.lstrip(line)
+ line = string.rstrip(line)
+ if line == '':
+ continue
+ while line[-1] == '\\':
+ line = line[:-1]
+ n = self.input.readline()
+ self.lineno = self.lineno + 1
+ n = string.lstrip(n)
+ n = string.rstrip(n)
+ if not n:
+ break
+ else:
+ line = line + n
return line
def getlineno(self):
@@ -378,193 +378,193 @@ class CLexer:
def debug(self):
print "Last token: ", self.last
- print "Token queue: ", self.tokens
- print "Line %d end: " % (self.lineno), self.line
+ print "Token queue: ", self.tokens
+ print "Line %d end: " % (self.lineno), self.line
def token(self):
while self.tokens == []:
- if self.line == "":
- line = self.getline()
- else:
- line = self.line
- self.line = ""
- if line == None:
- return None
-
- if line[0] == '#':
- self.tokens = map((lambda x: ('preproc', x)),
- string.split(line))
- break;
- l = len(line)
- if line[0] == '"' or line[0] == "'":
- end = line[0]
- line = line[1:]
- found = 0
- tok = ""
- while found == 0:
- i = 0
- l = len(line)
- while i < l:
- if line[i] == end:
- self.line = line[i+1:]
- line = line[:i]
- l = i
- found = 1
- break
- if line[i] == '\\':
- i = i + 1
- i = i + 1
- tok = tok + line
- if found == 0:
- line = self.getline()
- if line == None:
- return None
- self.last = ('string', tok)
- return self.last
-
- if l >= 2 and line[0] == '/' and line[1] == '*':
- line = line[2:]
- found = 0
- tok = ""
- while found == 0:
- i = 0
- l = len(line)
- while i < l:
- if line[i] == '*' and i+1 < l and line[i+1] == '/':
- self.line = line[i+2:]
- line = line[:i-1]
- l = i
- found = 1
- break
- i = i + 1
- if tok != "":
- tok = tok + "\n"
- tok = tok + line
- if found == 0:
- line = self.getline()
- if line == None:
- return None
- self.last = ('comment', tok)
- return self.last
- if l >= 2 and line[0] == '/' and line[1] == '/':
- line = line[2:]
- self.last = ('comment', line)
- return self.last
- i = 0
- while i < l:
- if line[i] == '/' and i+1 < l and line[i+1] == '/':
- self.line = line[i:]
- line = line[:i]
- break
- if line[i] == '/' and i+1 < l and line[i+1] == '*':
- self.line = line[i:]
- line = line[:i]
- break
- if line[i] == '"' or line[i] == "'":
- self.line = line[i:]
- line = line[:i]
- break
- i = i + 1
- l = len(line)
- i = 0
- while i < l:
- if line[i] == ' ' or line[i] == '\t':
- i = i + 1
- continue
- o = ord(line[i])
- if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
- (o >= 48 and o <= 57):
- s = i
- while i < l:
- o = ord(line[i])
- if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
- (o >= 48 and o <= 57) or string.find(
- " \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
- i = i + 1
- else:
- break
- self.tokens.append(('name', line[s:i]))
- continue
- if string.find("(){}:;,[]", line[i]) != -1:
+ if self.line == "":
+ line = self.getline()
+ else:
+ line = self.line
+ self.line = ""
+ if line == None:
+ return None
+
+ if line[0] == '#':
+ self.tokens = map((lambda x: ('preproc', x)),
+ string.split(line))
+ break;
+ l = len(line)
+ if line[0] == '"' or line[0] == "'":
+ end = line[0]
+ line = line[1:]
+ found = 0
+ tok = ""
+ while found == 0:
+ i = 0
+ l = len(line)
+ while i < l:
+ if line[i] == end:
+ self.line = line[i+1:]
+ line = line[:i]
+ l = i
+ found = 1
+ break
+ if line[i] == '\\':
+ i = i + 1
+ i = i + 1
+ tok = tok + line
+ if found == 0:
+ line = self.getline()
+ if line == None:
+ return None
+ self.last = ('string', tok)
+ return self.last
+
+ if l >= 2 and line[0] == '/' and line[1] == '*':
+ line = line[2:]
+ found = 0
+ tok = ""
+ while found == 0:
+ i = 0
+ l = len(line)
+ while i < l:
+ if line[i] == '*' and i+1 < l and line[i+1] == '/':
+ self.line = line[i+2:]
+ line = line[:i-1]
+ l = i
+ found = 1
+ break
+ i = i + 1
+ if tok != "":
+ tok = tok + "\n"
+ tok = tok + line
+ if found == 0:
+ line = self.getline()
+ if line == None:
+ return None
+ self.last = ('comment', tok)
+ return self.last
+ if l >= 2 and line[0] == '/' and line[1] == '/':
+ line = line[2:]
+ self.last = ('comment', line)
+ return self.last
+ i = 0
+ while i < l:
+ if line[i] == '/' and i+1 < l and line[i+1] == '/':
+ self.line = line[i:]
+ line = line[:i]
+ break
+ if line[i] == '/' and i+1 < l and line[i+1] == '*':
+ self.line = line[i:]
+ line = line[:i]
+ break
+ if line[i] == '"' or line[i] == "'":
+ self.line = line[i:]
+ line = line[:i]
+ break
+ i = i + 1
+ l = len(line)
+ i = 0
+ while i < l:
+ if line[i] == ' ' or line[i] == '\t':
+ i = i + 1
+ continue
+ o = ord(line[i])
+ if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
+ (o >= 48 and o <= 57):
+ s = i
+ while i < l:
+ o = ord(line[i])
+ if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
+ (o >= 48 and o <= 57) or string.find(
+ " \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
+ i = i + 1
+ else:
+ break
+ self.tokens.append(('name', line[s:i]))
+ continue
+ if string.find("(){}:;,[]", line[i]) != -1:
# if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
-# line[i] == '}' or line[i] == ':' or line[i] == ';' or \
-# line[i] == ',' or line[i] == '[' or line[i] == ']':
- self.tokens.append(('sep', line[i]))
- i = i + 1
- continue
- if string.find("+-*><=/%&!|.", line[i]) != -1:
+# line[i] == '}' or line[i] == ':' or line[i] == ';' or \
+# line[i] == ',' or line[i] == '[' or line[i] == ']':
+ self.tokens.append(('sep', line[i]))
+ i = i + 1
+ continue
+ if string.find("+-*><=/%&!|.", line[i]) != -1:
# if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
-# line[i] == '>' or line[i] == '<' or line[i] == '=' or \
-# line[i] == '/' or line[i] == '%' or line[i] == '&' or \
-# line[i] == '!' or line[i] == '|' or line[i] == '.':
- if line[i] == '.' and i + 2 < l and \
- line[i+1] == '.' and line[i+2] == '.':
- self.tokens.append(('name', '...'))
- i = i + 3
- continue
-
- j = i + 1
- if j < l and (
- string.find("+-*><=/%&!|", line[j]) != -1):
-# line[j] == '+' or line[j] == '-' or line[j] == '*' or \
-# line[j] == '>' or line[j] == '<' or line[j] == '=' or \
-# line[j] == '/' or line[j] == '%' or line[j] == '&' or \
-# line[j] == '!' or line[j] == '|'):
- self.tokens.append(('op', line[i:j+1]))
- i = j + 1
- else:
- self.tokens.append(('op', line[i]))
- i = i + 1
- continue
- s = i
- while i < l:
- o = ord(line[i])
- if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
- (o >= 48 and o <= 57) or (
- string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
-# line[i] != ' ' and line[i] != '\t' and
-# line[i] != '(' and line[i] != ')' and
-# line[i] != '{' and line[i] != '}' and
-# line[i] != ':' and line[i] != ';' and
-# line[i] != ',' and line[i] != '+' and
-# line[i] != '-' and line[i] != '*' and
-# line[i] != '/' and line[i] != '%' and
-# line[i] != '&' and line[i] != '!' and
-# line[i] != '|' and line[i] != '[' and
-# line[i] != ']' and line[i] != '=' and
-# line[i] != '*' and line[i] != '>' and
-# line[i] != '<'):
- i = i + 1
- else:
- break
- self.tokens.append(('name', line[s:i]))
-
- tok = self.tokens[0]
- self.tokens = self.tokens[1:]
- self.last = tok
- return tok
+# line[i] == '>' or line[i] == '<' or line[i] == '=' or \
+# line[i] == '/' or line[i] == '%' or line[i] == '&' or \
+# line[i] == '!' or line[i] == '|' or line[i] == '.':
+ if line[i] == '.' and i + 2 < l and \
+ line[i+1] == '.' and line[i+2] == '.':
+ self.tokens.append(('name', '...'))
+ i = i + 3
+ continue
+
+ j = i + 1
+ if j < l and (
+ string.find("+-*><=/%&!|", line[j]) != -1):
+# line[j] == '+' or line[j] == '-' or line[j] == '*' or \
+# line[j] == '>' or line[j] == '<' or line[j] == '=' or \
+# line[j] == '/' or line[j] == '%' or line[j] == '&' or \
+# line[j] == '!' or line[j] == '|'):
+ self.tokens.append(('op', line[i:j+1]))
+ i = j + 1
+ else:
+ self.tokens.append(('op', line[i]))
+ i = i + 1
+ continue
+ s = i
+ while i < l:
+ o = ord(line[i])
+ if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
+ (o >= 48 and o <= 57) or (
+ string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
+# line[i] != ' ' and line[i] != '\t' and
+# line[i] != '(' and line[i] != ')' and
+# line[i] != '{' and line[i] != '}' and
+# line[i] != ':' and line[i] != ';' and
+# line[i] != ',' and line[i] != '+' and
+# line[i] != '-' and line[i] != '*' and
+# line[i] != '/' and line[i] != '%' and
+# line[i] != '&' and line[i] != '!' and
+# line[i] != '|' and line[i] != '[' and
+# line[i] != ']' and line[i] != '=' and
+# line[i] != '*' and line[i] != '>' and
+# line[i] != '<'):
+ i = i + 1
+ else:
+ break
+ self.tokens.append(('name', line[s:i]))
+
+ tok = self.tokens[0]
+ self.tokens = self.tokens[1:]
+ self.last = tok
+ return tok
class CParser:
"""The C module parser"""
def __init__(self, filename, idx = None):
self.filename = filename
- if len(filename) > 2 and filename[-2:] == '.h':
- self.is_header = 1
- else:
- self.is_header = 0
+ if len(filename) > 2 and filename[-2:] == '.h':
+ self.is_header = 1
+ else:
+ self.is_header = 0
self.input = open(filename)
- self.lexer = CLexer(self.input)
- if idx == None:
- self.index = index()
- else:
- self.index = idx
- self.top_comment = ""
- self.last_comment = ""
- self.comment = None
- self.collect_ref = 0
- self.no_error = 0
- self.conditionals = []
- self.defines = []
+ self.lexer = CLexer(self.input)
+ if idx == None:
+ self.index = index()
+ else:
+ self.index = idx
+ self.top_comment = ""
+ self.last_comment = ""
+ self.comment = None
+ self.collect_ref = 0
+ self.no_error = 0
+ self.conditionals = []
+ self.defines = []
def collect_references(self):
self.collect_ref = 1
@@ -579,203 +579,203 @@ class CParser:
return self.lexer.getlineno()
def index_add(self, name, module, static, type, info=None, extra = None):
- if self.is_header == 1:
- self.index.add(name, module, module, static, type, self.lineno(),
- info, extra, self.conditionals)
- else:
- self.index.add(name, None, module, static, type, self.lineno(),
- info, extra, self.conditionals)
+ if self.is_header == 1:
+ self.index.add(name, module, module, static, type, self.lineno(),
+ info, extra, self.conditionals)
+ else:
+ self.index.add(name, None, module, static, type, self.lineno(),
+ info, extra, self.conditionals)
def index_add_ref(self, name, module, static, type, info=None,
extra = None):
- if self.is_header == 1:
- self.index.add_ref(name, module, module, static, type,
- self.lineno(), info, extra, self.conditionals)
- else:
- self.index.add_ref(name, None, module, static, type, self.lineno(),
- info, extra, self.conditionals)
+ if self.is_header == 1:
+ self.index.add_ref(name, module, module, static, type,
+ self.lineno(), info, extra, self.conditionals)
+ else:
+ self.index.add_ref(name, None, module, static, type, self.lineno(),
+ info, extra, self.conditionals)
def warning(self, msg):
if self.no_error:
- return
- print msg
+ return
+ print msg
def error(self, msg, token=-1):
if self.no_error:
- return
+ return
print "Parse Error: " + msg
- if token != -1:
- print "Got token ", token
- self.lexer.debug()
- sys.exit(1)
+ if token != -1:
+ print "Got token ", token
+ self.lexer.debug()
+ sys.exit(1)
def debug(self, msg, token=-1):
print "Debug: " + msg
- if token != -1:
- print "Got token ", token
- self.lexer.debug()
+ if token != -1:
+ print "Got token ", token
+ self.lexer.debug()
def parseTopComment(self, comment):
- res = {}
- lines = string.split(comment, "\n")
- item = None
- for line in lines:
- while line != "" and (line[0] == ' ' or line[0] == '\t'):
- line = line[1:]
- while line != "" and line[0] == '*':
- line = line[1:]
- while line != "" and (line[0] == ' ' or line[0] == '\t'):
- line = line[1:]
- try:
- (it, line) = string.split(line, ":", 1)
- item = it
- while line != "" and (line[0] == ' ' or line[0] == '\t'):
- line = line[1:]
- if res.has_key(item):
- res[item] = res[item] + " " + line
- else:
- res[item] = line
- except:
- if item != None:
- if res.has_key(item):
- res[item] = res[item] + " " + line
- else:
- res[item] = line
- self.index.info = res
+ res = {}
+ lines = string.split(comment, "\n")
+ item = None
+ for line in lines:
+ while line != "" and (line[0] == ' ' or line[0] == '\t'):
+ line = line[1:]
+ while line != "" and line[0] == '*':
+ line = line[1:]
+ while line != "" and (line[0] == ' ' or line[0] == '\t'):
+ line = line[1:]
+ try:
+ (it, line) = string.split(line, ":", 1)
+ item = it
+ while line != "" and (line[0] == ' ' or line[0] == '\t'):
+ line = line[1:]
+ if res.has_key(item):
+ res[item] = res[item] + " " + line
+ else:
+ res[item] = line
+ except:
+ if item != None:
+ if res.has_key(item):
+ res[item] = res[item] + " " + line
+ else:
+ res[item] = line
+ self.index.info = res
def parseComment(self, token):
if self.top_comment == "":
- self.top_comment = token[1]
- if self.comment == None or token[1][0] == '*':
- self.comment = token[1];
- else:
- self.comment = self.comment + token[1]
- token = self.lexer.token()
+ self.top_comment = token[1]
+ if self.comment == None or token[1][0] == '*':
+ self.comment = token[1];
+ else:
+ self.comment = self.comment + token[1]
+ token = self.lexer.token()
if string.find(self.comment, "DOC_DISABLE") != -1:
- self.stop_error()
+ self.stop_error()
if string.find(self.comment, "DOC_ENABLE") != -1:
- self.start_error()
+ self.start_error()
- return token
+ return token
#
# Parse a comment block associate to a typedef
#
def parseTypeComment(self, name, quiet = 0):
if name[0:2] == '__':
- quiet = 1
+ quiet = 1
args = []
- desc = ""
+ desc = ""
if self.comment == None:
- if not quiet:
- self.warning("Missing comment for type %s" % (name))
- return((args, desc))
+ if not quiet:
+ self.warning("Missing comment for type %s" % (name))
+ return((args, desc))
if self.comment[0] != '*':
- if not quiet:
- self.warning("Missing * in type comment for %s" % (name))
- return((args, desc))
- lines = string.split(self.comment, '\n')
- if lines[0] == '*':
- del lines[0]
- if lines[0] != "* %s:" % (name):
- if not quiet:
- self.warning("Misformatted type comment for %s" % (name))
- self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
- return((args, desc))
- del lines[0]
- while len(lines) > 0 and lines[0] == '*':
- del lines[0]
- desc = ""
- while len(lines) > 0:
- l = lines[0]
- while len(l) > 0 and l[0] == '*':
- l = l[1:]
- l = string.strip(l)
- desc = desc + " " + l
- del lines[0]
-
- desc = string.strip(desc)
-
- if quiet == 0:
- if desc == "":
- self.warning("Type comment for %s lack description of the macro" % (name))
-
- return(desc)
+ if not quiet:
+ self.warning("Missing * in type comment for %s" % (name))
+ return((args, desc))
+ lines = string.split(self.comment, '\n')
+ if lines[0] == '*':
+ del lines[0]
+ if lines[0] != "* %s:" % (name):
+ if not quiet:
+ self.warning("Misformatted type comment for %s" % (name))
+ self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
+ return((args, desc))
+ del lines[0]
+ while len(lines) > 0 and lines[0] == '*':
+ del lines[0]
+ desc = ""
+ while len(lines) > 0:
+ l = lines[0]
+ while len(l) > 0 and l[0] == '*':
+ l = l[1:]
+ l = string.strip(l)
+ desc = desc + " " + l
+ del lines[0]
+
+ desc = string.strip(desc)
+
+ if quiet == 0:
+ if desc == "":
+ self.warning("Type comment for %s lack description of the macro" % (name))
+
+ return(desc)
#
# Parse a comment block associate to a macro
#
def parseMacroComment(self, name, quiet = 0):
if name[0:2] == '__':
- quiet = 1
+ quiet = 1
args = []
- desc = ""
+ desc = ""
if self.comment == None:
- if not quiet:
- self.warning("Missing comment for macro %s" % (name))
- return((args, desc))
+ if not quiet:
+ self.warning("Missing comment for macro %s" % (name))
+ return((args, desc))
if self.comment[0] != '*':
- if not quiet:
- self.warning("Missing * in macro comment for %s" % (name))
- return((args, desc))
- lines = string.split(self.comment, '\n')
- if lines[0] == '*':
- del lines[0]
- if lines[0] != "* %s:" % (name):
- if not quiet:
- self.warning("Misformatted macro comment for %s" % (name))
- self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
- return((args, desc))
- del lines[0]
- while lines[0] == '*':
- del lines[0]
- while len(lines) > 0 and lines[0][0:3] == '* @':
- l = lines[0][3:]
- try:
- (arg, desc) = string.split(l, ':', 1)
- desc=string.strip(desc)
- arg=string.strip(arg)
+ if not quiet:
+ self.warning("Missing * in macro comment for %s" % (name))
+ return((args, desc))
+ lines = string.split(self.comment, '\n')
+ if lines[0] == '*':
+ del lines[0]
+ if lines[0] != "* %s:" % (name):
+ if not quiet:
+ self.warning("Misformatted macro comment for %s" % (name))
+ self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
+ return((args, desc))
+ del lines[0]
+ while lines[0] == '*':
+ del lines[0]
+ while len(lines) > 0 and lines[0][0:3] == '* @':
+ l = lines[0][3:]
+ try:
+ (arg, desc) = string.split(l, ':', 1)
+ desc=string.strip(desc)
+ arg=string.strip(arg)
except:
- if not quiet:
- self.warning("Misformatted macro comment for %s" % (name))
- self.warning(" problem with '%s'" % (lines[0]))
- del lines[0]
- continue
- del lines[0]
- l = string.strip(lines[0])
- while len(l) > 2 and l[0:3] != '* @':
- while l[0] == '*':
- l = l[1:]
- desc = desc + ' ' + string.strip(l)
- del lines[0]
- if len(lines) == 0:
- break
- l = lines[0]
+ if not quiet:
+ self.warning("Misformatted macro comment for %s" % (name))
+ self.warning(" problem with '%s'" % (lines[0]))
+ del lines[0]
+ continue
+ del lines[0]
+ l = string.strip(lines[0])
+ while len(l) > 2 and l[0:3] != '* @':
+ while l[0] == '*':
+ l = l[1:]
+ desc = desc + ' ' + string.strip(l)
+ del lines[0]
+ if len(lines) == 0:
+ break
+ l = lines[0]
args.append((arg, desc))
- while len(lines) > 0 and lines[0] == '*':
- del lines[0]
- desc = ""
- while len(lines) > 0:
- l = lines[0]
- while len(l) > 0 and l[0] == '*':
- l = l[1:]
- l = string.strip(l)
- desc = desc + " " + l
- del lines[0]
+ while len(lines) > 0 and lines[0] == '*':
+ del lines[0]
+ desc = ""
+ while len(lines) > 0:
+ l = lines[0]
+ while len(l) > 0 and l[0] == '*':
+ l = l[1:]
+ l = string.strip(l)
+ desc = desc + " " + l
+ del lines[0]
- desc = string.strip(desc)
+ desc = string.strip(desc)
- if quiet == 0:
- if desc == "":
- self.warning("Macro comment for %s lack description of the macro" % (name))
+ if quiet == 0:
+ if desc == "":
+ self.warning("Macro comment for %s lack description of the macro" % (name))
- return((args, desc))
+ return((args, desc))
#
# Parse a comment block and merge the information found in the
@@ -786,219 +786,219 @@ class CParser:
global ignored_functions
if name == 'main':
- quiet = 1
+ quiet = 1
if name[0:2] == '__':
- quiet = 1
+ quiet = 1
if ignored_functions.has_key(name):
quiet = 1
- (ret, args) = description
- desc = ""
- retdesc = ""
+ (ret, args) = description
+ desc = ""
+ retdesc = ""
if self.comment == None:
- if not quiet:
- self.warning("Missing comment for function %s" % (name))
- return(((ret[0], retdesc), args, desc))
+ if not quiet:
+ self.warning("Missing comment for function %s" % (name))
+ return(((ret[0], retdesc), args, desc))
if self.comment[0] != '*':
- if not quiet:
- self.warning("Missing * in function comment for %s" % (name))
- return(((ret[0], retdesc), args, desc))
- lines = string.split(self.comment, '\n')
- if lines[0] == '*':
- del lines[0]
- if lines[0] != "* %s:" % (name):
- if not quiet:
- self.warning("Misformatted function comment for %s" % (name))
- self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
- return(((ret[0], retdesc), args, desc))
- del lines[0]
- while lines[0] == '*':
- del lines[0]
- nbargs = len(args)
- while len(lines) > 0 and lines[0][0:3] == '* @':
- l = lines[0][3:]
- try:
- (arg, desc) = string.split(l, ':', 1)
- desc=string.strip(desc)
- arg=string.strip(arg)
+ if not quiet:
+ self.warning("Missing * in function comment for %s" % (name))
+ return(((ret[0], retdesc), args, desc))
+ lines = string.split(self.comment, '\n')
+ if lines[0] == '*':
+ del lines[0]
+ if lines[0] != "* %s:" % (name):
+ if not quiet:
+ self.warning("Misformatted function comment for %s" % (name))
+ self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
+ return(((ret[0], retdesc), args, desc))
+ del lines[0]
+ while lines[0] == '*':
+ del lines[0]
+ nbargs = len(args)
+ while len(lines) > 0 and lines[0][0:3] == '* @':
+ l = lines[0][3:]
+ try:
+ (arg, desc) = string.split(l, ':', 1)
+ desc=string.strip(desc)
+ arg=string.strip(arg)
except:
- if not quiet:
- self.warning("Misformatted function comment for %s" % (name))
- self.warning(" problem with '%s'" % (lines[0]))
- del lines[0]
- continue
- del lines[0]
- l = string.strip(lines[0])
- while len(l) > 2 and l[0:3] != '* @':
- while l[0] == '*':
- l = l[1:]
- desc = desc + ' ' + string.strip(l)
- del lines[0]
- if len(lines) == 0:
- break
- l = lines[0]
- i = 0
- while i < nbargs:
- if args[i][1] == arg:
- args[i] = (args[i][0], arg, desc)
- break;
- i = i + 1
- if i >= nbargs:
- if not quiet:
- self.warning("Unable to find arg %s from function comment for %s" % (
- arg, name))
- while len(lines) > 0 and lines[0] == '*':
- del lines[0]
- desc = None
- while len(lines) > 0:
- l = lines[0]
- i = 0
- # Remove all leading '*', followed by at most one ' ' character
- # since we need to preserve correct identation of code examples
- while i < len(l) and l[i] == '*':
- i = i + 1
- if i > 0:
- if i < len(l) and l[i] == ' ':
- i = i + 1
- l = l[i:]
- if len(l) >= 6 and l[0:7] == "returns" or l[0:7] == "Returns":
- try:
- l = string.split(l, ' ', 1)[1]
- except:
- l = ""
- retdesc = string.strip(l)
- del lines[0]
- while len(lines) > 0:
- l = lines[0]
- while len(l) > 0 and l[0] == '*':
- l = l[1:]
- l = string.strip(l)
- retdesc = retdesc + " " + l
- del lines[0]
- else:
- if desc is not None:
- desc = desc + "\n" + l
- else:
- desc = l
- del lines[0]
-
- if desc is None:
- desc = ""
- retdesc = string.strip(retdesc)
- desc = string.strip(desc)
-
- if quiet == 0:
- #
- # report missing comments
- #
- i = 0
- while i < nbargs:
- if args[i][2] == None and args[i][0] != "void" and args[i][1] != None:
- self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
- i = i + 1
- if retdesc == "" and ret[0] != "void":
- self.warning("Function comment for %s lacks description of return value" % (name))
- if desc == "":
- self.warning("Function comment for %s lacks description of the function" % (name))
-
-
- return(((ret[0], retdesc), args, desc))
+ if not quiet:
+ self.warning("Misformatted function comment for %s" % (name))
+ self.warning(" problem with '%s'" % (lines[0]))
+ del lines[0]
+ continue
+ del lines[0]
+ l = string.strip(lines[0])
+ while len(l) > 2 and l[0:3] != '* @':
+ while l[0] == '*':
+ l = l[1:]
+ desc = desc + ' ' + string.strip(l)
+ del lines[0]
+ if len(lines) == 0:
+ break
+ l = lines[0]
+ i = 0
+ while i < nbargs:
+ if args[i][1] == arg:
+ args[i] = (args[i][0], arg, desc)
+ break;
+ i = i + 1
+ if i >= nbargs:
+ if not quiet:
+ self.warning("Unable to find arg %s from function comment for %s" % (
+ arg, name))
+ while len(lines) > 0 and lines[0] == '*':
+ del lines[0]
+ desc = None
+ while len(lines) > 0:
+ l = lines[0]
+ i = 0
+ # Remove all leading '*', followed by at most one ' ' character
+ # since we need to preserve correct identation of code examples
+ while i < len(l) and l[i] == '*':
+ i = i + 1
+ if i > 0:
+ if i < len(l) and l[i] == ' ':
+ i = i + 1
+ l = l[i:]
+ if len(l) >= 6 and l[0:7] == "returns" or l[0:7] == "Returns":
+ try:
+ l = string.split(l, ' ', 1)[1]
+ except:
+ l = ""
+ retdesc = string.strip(l)
+ del lines[0]
+ while len(lines) > 0:
+ l = lines[0]
+ while len(l) > 0 and l[0] == '*':
+ l = l[1:]
+ l = string.strip(l)
+ retdesc = retdesc + " " + l
+ del lines[0]
+ else:
+ if desc is not None:
+ desc = desc + "\n" + l
+ else:
+ desc = l
+ del lines[0]
+
+ if desc is None:
+ desc = ""
+ retdesc = string.strip(retdesc)
+ desc = string.strip(desc)
+
+ if quiet == 0:
+ #
+ # report missing comments
+ #
+ i = 0
+ while i < nbargs:
+ if args[i][2] == None and args[i][0] != "void" and args[i][1] != None:
+ self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
+ i = i + 1
+ if retdesc == "" and ret[0] != "void":
+ self.warning("Function comment for %s lacks description of return value" % (name))
+ if desc == "":
+ self.warning("Function comment for %s lacks description of the function" % (name))
+
+
+ return(((ret[0], retdesc), args, desc))
def parsePreproc(self, token):
- if debug:
- print "=> preproc ", token, self.lexer.tokens
+ if debug:
+ print "=> preproc ", token, self.lexer.tokens
name = token[1]
- if name == "#include":
- token = self.lexer.token()
- if token == None:
- return None
- if token[0] == 'preproc':
- self.index_add(token[1], self.filename, not self.is_header,
- "include")
- return self.lexer.token()
- return token
- if name == "#define":
- token = self.lexer.token()
- if token == None:
- return None
- if token[0] == 'preproc':
- # TODO macros with arguments
- name = token[1]
- lst = []
- token = self.lexer.token()
- while token != None and token[0] == 'preproc' and \
- token[1][0] != '#':
- lst.append(token[1])
- token = self.lexer.token()
+ if name == "#include":
+ token = self.lexer.token()
+ if token == None:
+ return None
+ if token[0] == 'preproc':
+ self.index_add(token[1], self.filename, not self.is_header,
+ "include")
+ return self.lexer.token()
+ return token
+ if name == "#define":
+ token = self.lexer.token()
+ if token == None:
+ return None
+ if token[0] == 'preproc':
+ # TODO macros with arguments
+ name = token[1]
+ lst = []
+ token = self.lexer.token()
+ while token != None and token[0] == 'preproc' and \
+ token[1][0] != '#':
+ lst.append(token[1])
+ token = self.lexer.token()
try:
- name = string.split(name, '(') [0]
+ name = string.split(name, '(') [0]
except:
pass
info = self.parseMacroComment(name, not self.is_header)
- self.index_add(name, self.filename, not self.is_header,
- "macro", info)
- return token
-
- #
- # Processing of conditionals modified by Bill 1/1/05
- #
- # We process conditionals (i.e. tokens from #ifdef, #ifndef,
- # #if, #else and #endif) for headers and mainline code,
- # store the ones from the header in libxml2-api.xml, and later
- # (in the routine merge_public) verify that the two (header and
- # mainline code) agree.
- #
- # There is a small problem with processing the headers. Some of
- # the variables are not concerned with enabling / disabling of
- # library functions (e.g. '__XML_PARSER_H__'), and we don't want
- # them to be included in libxml2-api.xml, or involved in
- # the check between the header and the mainline code. To
- # accomplish this, we ignore any conditional which doesn't include
- # the string 'ENABLED'
- #
- if name == "#ifdef":
- apstr = self.lexer.tokens[0][1]
- try:
- self.defines.append(apstr)
- if string.find(apstr, 'ENABLED') != -1:
- self.conditionals.append("defined(%s)" % apstr)
- except:
- pass
- elif name == "#ifndef":
- apstr = self.lexer.tokens[0][1]
- try:
- self.defines.append(apstr)
- if string.find(apstr, 'ENABLED') != -1:
- self.conditionals.append("!defined(%s)" % apstr)
- except:
- pass
- elif name == "#if":
- apstr = ""
- for tok in self.lexer.tokens:
- if apstr != "":
- apstr = apstr + " "
- apstr = apstr + tok[1]
- try:
- self.defines.append(apstr)
- if string.find(apstr, 'ENABLED') != -1:
- self.conditionals.append(apstr)
- except:
- pass
- elif name == "#else":
- if self.conditionals != [] and \
- string.find(self.defines[-1], 'ENABLED') != -1:
- self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
- elif name == "#endif":
- if self.conditionals != [] and \
- string.find(self.defines[-1], 'ENABLED') != -1:
- self.conditionals = self.conditionals[:-1]
- self.defines = self.defines[:-1]
- token = self.lexer.token()
- while token != None and token[0] == 'preproc' and \
- token[1][0] != '#':
- token = self.lexer.token()
- return token
+ self.index_add(name, self.filename, not self.is_header,
+ "macro", info)
+ return token
+
+ #
+ # Processing of conditionals modified by Bill 1/1/05
+ #
+ # We process conditionals (i.e. tokens from #ifdef, #ifndef,
+ # #if, #else and #endif) for headers and mainline code,
+ # store the ones from the header in libxml2-api.xml, and later
+ # (in the routine merge_public) verify that the two (header and
+ # mainline code) agree.
+ #
+ # There is a small problem with processing the headers. Some of
+ # the variables are not concerned with enabling / disabling of
+ # library functions (e.g. '__XML_PARSER_H__'), and we don't want
+ # them to be included in libxml2-api.xml, or involved in
+ # the check between the header and the mainline code. To
+ # accomplish this, we ignore any conditional which doesn't include
+ # the string 'ENABLED'
+ #
+ if name == "#ifdef":
+ apstr = self.lexer.tokens[0][1]
+ try:
+ self.defines.append(apstr)
+ if string.find(apstr, 'ENABLED') != -1:
+ self.conditionals.append("defined(%s)" % apstr)
+ except:
+ pass
+ elif name == "#ifndef":
+ apstr = self.lexer.tokens[0][1]
+ try:
+ self.defines.append(apstr)
+ if string.find(apstr, 'ENABLED') != -1:
+ self.conditionals.append("!defined(%s)" % apstr)
+ except:
+ pass
+ elif name == "#if":
+ apstr = ""
+ for tok in self.lexer.tokens:
+ if apstr != "":
+ apstr = apstr + " "
+ apstr = apstr + tok[1]
+ try:
+ self.defines.append(apstr)
+ if string.find(apstr, 'ENABLED') != -1:
+ self.conditionals.append(apstr)
+ except:
+ pass
+ elif name == "#else":
+ if self.conditionals != [] and \
+ string.find(self.defines[-1], 'ENABLED') != -1:
+ self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
+ elif name == "#endif":
+ if self.conditionals != [] and \
+ string.find(self.defines[-1], 'ENABLED') != -1:
+ self.conditionals = self.conditionals[:-1]
+ self.defines = self.defines[:-1]
+ token = self.lexer.token()
+ while token != None and token[0] == 'preproc' and \
+ token[1][0] != '#':
+ token = self.lexer.token()
+ return token
#
# token acquisition on top of the lexer, it handle internally
@@ -1012,89 +1012,89 @@ class CParser:
global ignored_words
token = self.lexer.token()
- while token != None:
- if token[0] == 'comment':
- token = self.parseComment(token)
- continue
- elif token[0] == 'preproc':
- token = self.parsePreproc(token)
- continue
- elif token[0] == "name" and token[1] == "__const":
- token = ("name", "const")
- return token
- elif token[0] == "name" and token[1] == "__attribute":
- token = self.lexer.token()
- while token != None and token[1] != ";":
- token = self.lexer.token()
- return token
- elif token[0] == "name" and ignored_words.has_key(token[1]):
- (n, info) = ignored_words[token[1]]
- i = 0
- while i < n:
- token = self.lexer.token()
- i = i + 1
- token = self.lexer.token()
- continue
- else:
- if debug:
- print "=> ", token
- return token
- return None
+ while token != None:
+ if token[0] == 'comment':
+ token = self.parseComment(token)
+ continue
+ elif token[0] == 'preproc':
+ token = self.parsePreproc(token)
+ continue
+ elif token[0] == "name" and token[1] == "__const":
+ token = ("name", "const")
+ return token
+ elif token[0] == "name" and token[1] == "__attribute":
+ token = self.lexer.token()
+ while token != None and token[1] != ";":
+ token = self.lexer.token()
+ return token
+ elif token[0] == "name" and ignored_words.has_key(token[1]):
+ (n, info) = ignored_words[token[1]]
+ i = 0
+ while i < n:
+ token = self.lexer.token()
+ i = i + 1
+ token = self.lexer.token()
+ continue
+ else:
+ if debug:
+ print "=> ", token
+ return token
+ return None
#
# Parse a typedef, it records the type and its name.
#
def parseTypedef(self, token):
if token == None:
- return None
- token = self.parseType(token)
- if token == None:
- self.error("parsing typedef")
- return None
- base_type = self.type
- type = base_type
- #self.debug("end typedef type", token)
- while token != None:
- if token[0] == "name":
- name = token[1]
- signature = self.signature
- if signature != None:
- type = string.split(type, '(')[0]
- d = self.mergeFunctionComment(name,
- ((type, None), signature), 1)
- self.index_add(name, self.filename, not self.is_header,
- "functype", d)
- else:
- if base_type == "struct":
- self.index_add(name, self.filename, not self.is_header,
- "struct", type)
- base_type = "struct " + name
- else:
- # TODO report missing or misformatted comments
- info = self.parseTypeComment(name, 1)
- self.index_add(name, self.filename, not self.is_header,
- "typedef", type, info)
- token = self.token()
- else:
- self.error("parsing typedef: expecting a name")
- return token
- #self.debug("end typedef", token)
- if token != None and token[0] == 'sep' and token[1] == ',':
- type = base_type
- token = self.token()
- while token != None and token[0] == "op":
- type = type + token[1]
- token = self.token()
- elif token != None and token[0] == 'sep' and token[1] == ';':
- break;
- elif token != None and token[0] == 'name':
- type = base_type
- continue;
- else:
- self.error("parsing typedef: expecting ';'", token)
- return token
- token = self.token()
- return token
+ return None
+ token = self.parseType(token)
+ if token == None:
+ self.error("parsing typedef")
+ return None
+ base_type = self.type
+ type = base_type
+ #self.debug("end typedef type", token)
+ while token != None:
+ if token[0] == "name":
+ name = token[1]
+ signature = self.signature
+ if signature != None:
+ type = string.split(type, '(')[0]
+ d = self.mergeFunctionComment(name,
+ ((type, None), signature), 1)
+ self.index_add(name, self.filename, not self.is_header,
+ "functype", d)
+ else:
+ if base_type == "struct":
+ self.index_add(name, self.filename, not self.is_header,
+ "struct", type)
+ base_type = "struct " + name
+ else:
+ # TODO report missing or misformatted comments
+ info = self.parseTypeComment(name, 1)
+ self.index_add(name, self.filename, not self.is_header,
+ "typedef", type, info)
+ token = self.token()
+ else:
+ self.error("parsing typedef: expecting a name")
+ return token
+ #self.debug("end typedef", token)
+ if token != None and token[0] == 'sep' and token[1] == ',':
+ type = base_type
+ token = self.token()
+ while token != None and token[0] == "op":
+ type = type + token[1]
+ token = self.token()
+ elif token != None and token[0] == 'sep' and token[1] == ';':
+ break;
+ elif token != None and token[0] == 'name':
+ type = base_type
+ continue;
+ else:
+ self.error("parsing typedef: expecting ';'", token)
+ return token
+ token = self.token()
+ return token
#
# Parse a C code block, used for functions it parse till
@@ -1102,138 +1102,138 @@ class CParser:
#
def parseBlock(self, token):
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- self.comment = None
- token = self.token()
- return token
- else:
- if self.collect_ref == 1:
- oldtok = token
- token = self.token()
- if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
- if token[0] == "sep" and token[1] == "(":
- self.index_add_ref(oldtok[1], self.filename,
- 0, "function")
- token = self.token()
- elif token[0] == "name":
- token = self.token()
- if token[0] == "sep" and (token[1] == ";" or
- token[1] == "," or token[1] == "="):
- self.index_add_ref(oldtok[1], self.filename,
- 0, "type")
- elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
- self.index_add_ref(oldtok[1], self.filename,
- 0, "typedef")
- elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
- self.index_add_ref(oldtok[1], self.filename,
- 0, "typedef")
-
- else:
- token = self.token()
- return token
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ self.comment = None
+ token = self.token()
+ return token
+ else:
+ if self.collect_ref == 1:
+ oldtok = token
+ token = self.token()
+ if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
+ if token[0] == "sep" and token[1] == "(":
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "function")
+ token = self.token()
+ elif token[0] == "name":
+ token = self.token()
+ if token[0] == "sep" and (token[1] == ";" or
+ token[1] == "," or token[1] == "="):
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "type")
+ elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "typedef")
+ elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "typedef")
+
+ else:
+ token = self.token()
+ return token
#
# Parse a C struct definition till the balancing }
#
def parseStruct(self, token):
fields = []
- #self.debug("start parseStruct", token)
+ #self.debug("start parseStruct", token)
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- self.struct_fields = fields
- #self.debug("end parseStruct", token)
- #print fields
- token = self.token()
- return token
- else:
- base_type = self.type
- #self.debug("before parseType", token)
- token = self.parseType(token)
- #self.debug("after parseType", token)
- if token != None and token[0] == "name":
- fname = token[1]
- token = self.token()
- if token[0] == "sep" and token[1] == ";":
- self.comment = None
- token = self.token()
- fields.append((self.type, fname, self.comment))
- self.comment = None
- else:
- self.error("parseStruct: expecting ;", token)
- elif token != None and token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- if token != None and token[0] == "name":
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == ";":
- token = self.token()
- else:
- self.error("parseStruct: expecting ;", token)
- else:
- self.error("parseStruct: name", token)
- token = self.token()
- self.type = base_type;
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ self.struct_fields = fields
+ #self.debug("end parseStruct", token)
+ #print fields
+ token = self.token()
+ return token
+ else:
+ base_type = self.type
+ #self.debug("before parseType", token)
+ token = self.parseType(token)
+ #self.debug("after parseType", token)
+ if token != None and token[0] == "name":
+ fname = token[1]
+ token = self.token()
+ if token[0] == "sep" and token[1] == ";":
+ self.comment = None
+ token = self.token()
+ fields.append((self.type, fname, self.comment))
+ self.comment = None
+ else:
+ self.error("parseStruct: expecting ;", token)
+ elif token != None and token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ if token != None and token[0] == "name":
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == ";":
+ token = self.token()
+ else:
+ self.error("parseStruct: expecting ;", token)
+ else:
+ self.error("parseStruct: name", token)
+ token = self.token()
+ self.type = base_type;
self.struct_fields = fields
- #self.debug("end parseStruct", token)
- #print fields
- return token
+ #self.debug("end parseStruct", token)
+ #print fields
+ return token
#
# Parse a C enum block, parse till the balancing }
#
def parseEnumBlock(self, token):
self.enums = []
- name = None
- self.comment = None
- comment = ""
- value = "0"
+ name = None
+ self.comment = None
+ comment = ""
+ value = "0"
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- if name != None:
- if self.comment != None:
- comment = self.comment
- self.comment = None
- self.enums.append((name, value, comment))
- token = self.token()
- return token
- elif token[0] == "name":
- if name != None:
- if self.comment != None:
- comment = string.strip(self.comment)
- self.comment = None
- self.enums.append((name, value, comment))
- name = token[1]
- comment = ""
- token = self.token()
- if token[0] == "op" and token[1][0] == "=":
- value = ""
- if len(token[1]) > 1:
- value = token[1][1:]
- token = self.token()
- while token[0] != "sep" or (token[1] != ',' and
- token[1] != '}'):
- value = value + token[1]
- token = self.token()
- else:
- try:
- value = "%d" % (int(value) + 1)
- except:
- self.warning("Failed to compute value of enum %s" % (name))
- value=""
- if token[0] == "sep" and token[1] == ",":
- token = self.token()
- else:
- token = self.token()
- return token
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ if name != None:
+ if self.comment != None:
+ comment = self.comment
+ self.comment = None
+ self.enums.append((name, value, comment))
+ token = self.token()
+ return token
+ elif token[0] == "name":
+ if name != None:
+ if self.comment != None:
+ comment = string.strip(self.comment)
+ self.comment = None
+ self.enums.append((name, value, comment))
+ name = token[1]
+ comment = ""
+ token = self.token()
+ if token[0] == "op" and token[1][0] == "=":
+ value = ""
+ if len(token[1]) > 1:
+ value = token[1][1:]
+ token = self.token()
+ while token[0] != "sep" or (token[1] != ',' and
+ token[1] != '}'):
+ value = value + token[1]
+ token = self.token()
+ else:
+ try:
+ value = "%d" % (int(value) + 1)
+ except:
+ self.warning("Failed to compute value of enum %s" % (name))
+ value=""
+ if token[0] == "sep" and token[1] == ",":
+ token = self.token()
+ else:
+ token = self.token()
+ return token
#
# Parse a C definition block, used for structs it parse till
@@ -1241,15 +1241,15 @@ class CParser:
#
def parseTypeBlock(self, token):
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- token = self.token()
- return token
- else:
- token = self.token()
- return token
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ token = self.token()
+ return token
+ else:
+ token = self.token()
+ return token
#
# Parse a type: the fact that the type name can either occur after
@@ -1258,221 +1258,221 @@ class CParser:
#
def parseType(self, token):
self.type = ""
- self.struct_fields = []
+ self.struct_fields = []
self.signature = None
- if token == None:
- return token
-
- while token[0] == "name" and (
- token[1] == "const" or \
- token[1] == "unsigned" or \
- token[1] == "signed"):
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- token = self.token()
+ if token == None:
+ return token
+
+ while token[0] == "name" and (
+ token[1] == "const" or \
+ token[1] == "unsigned" or \
+ token[1] == "signed"):
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ token = self.token()
if token[0] == "name" and token[1] == "long":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
-
- # some read ahead for long long
- oldtmp = token
- token = self.token()
- if token[0] == "name" and token[1] == "long":
- self.type = self.type + " " + token[1]
- else:
- self.push(token)
- token = oldtmp
-
- if token[0] == "name" and token[1] == "int":
- if self.type == "":
- self.type = tmp[1]
- else:
- self.type = self.type + " " + tmp[1]
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+
+ # some read ahead for long long
+ oldtmp = token
+ token = self.token()
+ if token[0] == "name" and token[1] == "long":
+ self.type = self.type + " " + token[1]
+ else:
+ self.push(token)
+ token = oldtmp
+
+ if token[0] == "name" and token[1] == "int":
+ if self.type == "":
+ self.type = tmp[1]
+ else:
+ self.type = self.type + " " + tmp[1]
elif token[0] == "name" and token[1] == "short":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- if token[0] == "name" and token[1] == "int":
- if self.type == "":
- self.type = tmp[1]
- else:
- self.type = self.type + " " + tmp[1]
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ if token[0] == "name" and token[1] == "int":
+ if self.type == "":
+ self.type = tmp[1]
+ else:
+ self.type = self.type + " " + tmp[1]
elif token[0] == "name" and token[1] == "struct":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- token = self.token()
- nametok = None
- if token[0] == "name":
- nametok = token
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseStruct(token)
- elif token != None and token[0] == "op" and token[1] == "*":
- self.type = self.type + " " + nametok[1] + " *"
- token = self.token()
- while token != None and token[0] == "op" and token[1] == "*":
- self.type = self.type + " *"
- token = self.token()
- if token[0] == "name":
- nametok = token
- token = self.token()
- else:
- self.error("struct : expecting name", token)
- return token
- elif token != None and token[0] == "name" and nametok != None:
- self.type = self.type + " " + nametok[1]
- return token
-
- if nametok != None:
- self.lexer.push(token)
- token = nametok
- return token
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ token = self.token()
+ nametok = None
+ if token[0] == "name":
+ nametok = token
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseStruct(token)
+ elif token != None and token[0] == "op" and token[1] == "*":
+ self.type = self.type + " " + nametok[1] + " *"
+ token = self.token()
+ while token != None and token[0] == "op" and token[1] == "*":
+ self.type = self.type + " *"
+ token = self.token()
+ if token[0] == "name":
+ nametok = token
+ token = self.token()
+ else:
+ self.error("struct : expecting name", token)
+ return token
+ elif token != None and token[0] == "name" and nametok != None:
+ self.type = self.type + " " + nametok[1]
+ return token
+
+ if nametok != None:
+ self.lexer.push(token)
+ token = nametok
+ return token
elif token[0] == "name" and token[1] == "enum":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- self.enums = []
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseEnumBlock(token)
- else:
- self.error("parsing enum: expecting '{'", token)
- enum_type = None
- if token != None and token[0] != "name":
- self.lexer.push(token)
- token = ("name", "enum")
- else:
- enum_type = token[1]
- for enum in self.enums:
- self.index_add(enum[0], self.filename,
- not self.is_header, "enum",
- (enum[1], enum[2], enum_type))
- return token
-
- elif token[0] == "name":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- else:
- self.error("parsing type %s: expecting a name" % (self.type),
- token)
- return token
- token = self.token()
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ self.enums = []
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseEnumBlock(token)
+ else:
+ self.error("parsing enum: expecting '{'", token)
+ enum_type = None
+ if token != None and token[0] != "name":
+ self.lexer.push(token)
+ token = ("name", "enum")
+ else:
+ enum_type = token[1]
+ for enum in self.enums:
+ self.index_add(enum[0], self.filename,
+ not self.is_header, "enum",
+ (enum[1], enum[2], enum_type))
+ return token
+
+ elif token[0] == "name":
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ else:
+ self.error("parsing type %s: expecting a name" % (self.type),
+ token)
+ return token
+ token = self.token()
while token != None and (token[0] == "op" or
- token[0] == "name" and token[1] == "const"):
- self.type = self.type + " " + token[1]
- token = self.token()
-
- #
- # if there is a parenthesis here, this means a function type
- #
- if token != None and token[0] == "sep" and token[1] == '(':
- self.type = self.type + token[1]
- token = self.token()
- while token != None and token[0] == "op" and token[1] == '*':
- self.type = self.type + token[1]
- token = self.token()
- if token == None or token[0] != "name" :
- self.error("parsing function type, name expected", token);
- return token
- self.type = self.type + token[1]
- nametok = token
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == ')':
- self.type = self.type + token[1]
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == '(':
- token = self.token()
- type = self.type;
- token = self.parseSignature(token);
- self.type = type;
- else:
- self.error("parsing function type, '(' expected", token);
- return token
- else:
- self.error("parsing function type, ')' expected", token);
- return token
- self.lexer.push(token)
- token = nametok
- return token
+ token[0] == "name" and token[1] == "const"):
+ self.type = self.type + " " + token[1]
+ token = self.token()
#
- # do some lookahead for arrays
- #
- if token != None and token[0] == "name":
- nametok = token
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == '[':
- self.type = self.type + nametok[1]
- while token != None and token[0] == "sep" and token[1] == '[':
- self.type = self.type + token[1]
- token = self.token()
- while token != None and token[0] != 'sep' and \
- token[1] != ']' and token[1] != ';':
- self.type = self.type + token[1]
- token = self.token()
- if token != None and token[0] == 'sep' and token[1] == ']':
- self.type = self.type + token[1]
- token = self.token()
- else:
- self.error("parsing array type, ']' expected", token);
- return token
- elif token != None and token[0] == "sep" and token[1] == ':':
- # remove :12 in case it's a limited int size
- token = self.token()
- token = self.token()
- self.lexer.push(token)
- token = nametok
-
- return token
+ # if there is a parenthesis here, this means a function type
+ #
+ if token != None and token[0] == "sep" and token[1] == '(':
+ self.type = self.type + token[1]
+ token = self.token()
+ while token != None and token[0] == "op" and token[1] == '*':
+ self.type = self.type + token[1]
+ token = self.token()
+ if token == None or token[0] != "name" :
+ self.error("parsing function type, name expected", token);
+ return token
+ self.type = self.type + token[1]
+ nametok = token
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == ')':
+ self.type = self.type + token[1]
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == '(':
+ token = self.token()
+ type = self.type;
+ token = self.parseSignature(token);
+ self.type = type;
+ else:
+ self.error("parsing function type, '(' expected", token);
+ return token
+ else:
+ self.error("parsing function type, ')' expected", token);
+ return token
+ self.lexer.push(token)
+ token = nametok
+ return token
+
+ #
+ # do some lookahead for arrays
+ #
+ if token != None and token[0] == "name":
+ nametok = token
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == '[':
+ self.type = self.type + nametok[1]
+ while token != None and token[0] == "sep" and token[1] == '[':
+ self.type = self.type + token[1]
+ token = self.token()
+ while token != None and token[0] != 'sep' and \
+ token[1] != ']' and token[1] != ';':
+ self.type = self.type + token[1]
+ token = self.token()
+ if token != None and token[0] == 'sep' and token[1] == ']':
+ self.type = self.type + token[1]
+ token = self.token()
+ else:
+ self.error("parsing array type, ']' expected", token);
+ return token
+ elif token != None and token[0] == "sep" and token[1] == ':':
+ # remove :12 in case it's a limited int size
+ token = self.token()
+ token = self.token()
+ self.lexer.push(token)
+ token = nametok
+
+ return token
#
# Parse a signature: '(' has been parsed and we scan the type definition
# up to the ')' included
def parseSignature(self, token):
signature = []
- if token != None and token[0] == "sep" and token[1] == ')':
- self.signature = []
- token = self.token()
- return token
- while token != None:
- token = self.parseType(token)
- if token != None and token[0] == "name":
- signature.append((self.type, token[1], None))
- token = self.token()
- elif token != None and token[0] == "sep" and token[1] == ',':
- token = self.token()
- continue
- elif token != None and token[0] == "sep" and token[1] == ')':
- # only the type was provided
- if self.type == "...":
- signature.append((self.type, "...", None))
- else:
- signature.append((self.type, None, None))
- if token != None and token[0] == "sep":
- if token[1] == ',':
- token = self.token()
- continue
- elif token[1] == ')':
- token = self.token()
- break
- self.signature = signature
- return token
+ if token != None and token[0] == "sep" and token[1] == ')':
+ self.signature = []
+ token = self.token()
+ return token
+ while token != None:
+ token = self.parseType(token)
+ if token != None and token[0] == "name":
+ signature.append((self.type, token[1], None))
+ token = self.token()
+ elif token != None and token[0] == "sep" and token[1] == ',':
+ token = self.token()
+ continue
+ elif token != None and token[0] == "sep" and token[1] == ')':
+ # only the type was provided
+ if self.type == "...":
+ signature.append((self.type, "...", None))
+ else:
+ signature.append((self.type, None, None))
+ if token != None and token[0] == "sep":
+ if token[1] == ',':
+ token = self.token()
+ continue
+ elif token[1] == ')':
+ token = self.token()
+ break
+ self.signature = signature
+ return token
#
# Parse a global definition, be it a type, variable or function
@@ -1481,134 +1481,134 @@ class CParser:
def parseGlobal(self, token):
static = 0
if token[1] == 'extern':
- token = self.token()
- if token == None:
- return token
- if token[0] == 'string':
- if token[1] == 'C':
- token = self.token()
- if token == None:
- return token
- if token[0] == 'sep' and token[1] == "{":
- token = self.token()
-# print 'Entering extern "C line ', self.lineno()
- while token != None and (token[0] != 'sep' or
- token[1] != "}"):
- if token[0] == 'name':
- token = self.parseGlobal(token)
- else:
- self.error(
- "token %s %s unexpected at the top level" % (
- token[0], token[1]))
- token = self.parseGlobal(token)
-# print 'Exiting extern "C" line', self.lineno()
- token = self.token()
- return token
- else:
- return token
- elif token[1] == 'static':
- static = 1
- token = self.token()
- if token == None or token[0] != 'name':
- return token
-
- if token[1] == 'typedef':
- token = self.token()
- return self.parseTypedef(token)
- else:
- token = self.parseType(token)
- type_orig = self.type
- if token == None or token[0] != "name":
- return token
- type = type_orig
- self.name = token[1]
- token = self.token()
- while token != None and (token[0] == "sep" or token[0] == "op"):
- if token[0] == "sep":
- if token[1] == "[":
- type = type + token[1]
- token = self.token()
- while token != None and (token[0] != "sep" or \
- token[1] != ";"):
- type = type + token[1]
- token = self.token()
-
- if token != None and token[0] == "op" and token[1] == "=":
- #
- # Skip the initialization of the variable
- #
- token = self.token()
- if token[0] == 'sep' and token[1] == '{':
- token = self.token()
- token = self.parseBlock(token)
- else:
- self.comment = None
- while token != None and (token[0] != "sep" or \
- (token[1] != ';' and token[1] != ',')):
- token = self.token()
- self.comment = None
- if token == None or token[0] != "sep" or (token[1] != ';' and
- token[1] != ','):
- self.error("missing ';' or ',' after value")
-
- if token != None and token[0] == "sep":
- if token[1] == ";":
- self.comment = None
- token = self.token()
- if type == "struct":
- self.index_add(self.name, self.filename,
- not self.is_header, "struct", self.struct_fields)
- else:
- self.index_add(self.name, self.filename,
- not self.is_header, "variable", type)
- break
- elif token[1] == "(":
- token = self.token()
- token = self.parseSignature(token)
- if token == None:
- return None
- if token[0] == "sep" and token[1] == ";":
- d = self.mergeFunctionComment(self.name,
- ((type, None), self.signature), 1)
- self.index_add(self.name, self.filename, static,
- "function", d)
- token = self.token()
- elif token[0] == "sep" and token[1] == "{":
- d = self.mergeFunctionComment(self.name,
- ((type, None), self.signature), static)
- self.index_add(self.name, self.filename, static,
- "function", d)
- token = self.token()
- token = self.parseBlock(token);
- elif token[1] == ',':
- self.comment = None
- self.index_add(self.name, self.filename, static,
- "variable", type)
- type = type_orig
- token = self.token()
- while token != None and token[0] == "sep":
- type = type + token[1]
- token = self.token()
- if token != None and token[0] == "name":
- self.name = token[1]
- token = self.token()
- else:
- break
-
- return token
+ token = self.token()
+ if token == None:
+ return token
+ if token[0] == 'string':
+ if token[1] == 'C':
+ token = self.token()
+ if token == None:
+ return token
+ if token[0] == 'sep' and token[1] == "{":
+ token = self.token()
+# print 'Entering extern "C line ', self.lineno()
+ while token != None and (token[0] != 'sep' or
+ token[1] != "}"):
+ if token[0] == 'name':
+ token = self.parseGlobal(token)
+ else:
+ self.error(
+ "token %s %s unexpected at the top level" % (
+ token[0], token[1]))
+ token = self.parseGlobal(token)
+# print 'Exiting extern "C" line', self.lineno()
+ token = self.token()
+ return token
+ else:
+ return token
+ elif token[1] == 'static':
+ static = 1
+ token = self.token()
+ if token == None or token[0] != 'name':
+ return token
+
+ if token[1] == 'typedef':
+ token = self.token()
+ return self.parseTypedef(token)
+ else:
+ token = self.parseType(token)
+ type_orig = self.type
+ if token == None or token[0] != "name":
+ return token
+ type = type_orig
+ self.name = token[1]
+ token = self.token()
+ while token != None and (token[0] == "sep" or token[0] == "op"):
+ if token[0] == "sep":
+ if token[1] == "[":
+ type = type + token[1]
+ token = self.token()
+ while token != None and (token[0] != "sep" or \
+ token[1] != ";"):
+ type = type + token[1]
+ token = self.token()
+
+ if token != None and token[0] == "op" and token[1] == "=":
+ #
+ # Skip the initialization of the variable
+ #
+ token = self.token()
+ if token[0] == 'sep' and token[1] == '{':
+ token = self.token()
+ token = self.parseBlock(token)
+ else:
+ self.comment = None
+ while token != None and (token[0] != "sep" or \
+ (token[1] != ';' and token[1] != ',')):
+ token = self.token()
+ self.comment = None
+ if token == None or token[0] != "sep" or (token[1] != ';' and
+ token[1] != ','):
+ self.error("missing ';' or ',' after value")
+
+ if token != None and token[0] == "sep":
+ if token[1] == ";":
+ self.comment = None
+ token = self.token()
+ if type == "struct":
+ self.index_add(self.name, self.filename,
+ not self.is_header, "struct", self.struct_fields)
+ else:
+ self.index_add(self.name, self.filename,
+ not self.is_header, "variable", type)
+ break
+ elif token[1] == "(":
+ token = self.token()
+ token = self.parseSignature(token)
+ if token == None:
+ return None
+ if token[0] == "sep" and token[1] == ";":
+ d = self.mergeFunctionComment(self.name,
+ ((type, None), self.signature), 1)
+ self.index_add(self.name, self.filename, static,
+ "function", d)
+ token = self.token()
+ elif token[0] == "sep" and token[1] == "{":
+ d = self.mergeFunctionComment(self.name,
+ ((type, None), self.signature), static)
+ self.index_add(self.name, self.filename, static,
+ "function", d)
+ token = self.token()
+ token = self.parseBlock(token);
+ elif token[1] == ',':
+ self.comment = None
+ self.index_add(self.name, self.filename, static,
+ "variable", type)
+ type = type_orig
+ token = self.token()
+ while token != None and token[0] == "sep":
+ type = type + token[1]
+ token = self.token()
+ if token != None and token[0] == "name":
+ self.name = token[1]
+ token = self.token()
+ else:
+ break
+
+ return token
def parse(self):
self.warning("Parsing %s" % (self.filename))
token = self.token()
- while token != None:
+ while token != None:
if token[0] == 'name':
- token = self.parseGlobal(token)
+ token = self.parseGlobal(token)
else:
- self.error("token %s %s unexpected at the top level" % (
- token[0], token[1]))
- token = self.parseGlobal(token)
- return
- self.parseTopComment(self.top_comment)
+ self.error("token %s %s unexpected at the top level" % (
+ token[0], token[1]))
+ token = self.parseGlobal(token)
+ return
+ self.parseTopComment(self.top_comment)
return self.index
@@ -1618,449 +1618,449 @@ class docBuilder:
self.name = name
self.path = path
self.directories = directories
- self.includes = includes + included_files.keys()
- self.modules = {}
- self.headers = {}
- self.idx = index()
+ self.includes = includes + included_files.keys()
+ self.modules = {}
+ self.headers = {}
+ self.idx = index()
self.xref = {}
- self.index = {}
- self.basename = name
+ self.index = {}
+ self.basename = name
def indexString(self, id, str):
- if str == None:
- return
- str = string.replace(str, "'", ' ')
- str = string.replace(str, '"', ' ')
- str = string.replace(str, "/", ' ')
- str = string.replace(str, '*', ' ')
- str = string.replace(str, "[", ' ')
- str = string.replace(str, "]", ' ')
- str = string.replace(str, "(", ' ')
- str = string.replace(str, ")", ' ')
- str = string.replace(str, "<", ' ')
- str = string.replace(str, '>', ' ')
- str = string.replace(str, "&", ' ')
- str = string.replace(str, '#', ' ')
- str = string.replace(str, ",", ' ')
- str = string.replace(str, '.', ' ')
- str = string.replace(str, ';', ' ')
- tokens = string.split(str)
- for token in tokens:
- try:
- c = token[0]
- if string.find(string.letters, c) < 0:
- pass
- elif len(token) < 3:
- pass
- else:
- lower = string.lower(token)
- # TODO: generalize this a bit
- if lower == 'and' or lower == 'the':
- pass
- elif self.xref.has_key(token):
- self.xref[token].append(id)
- else:
- self.xref[token] = [id]
- except:
- pass
+ if str == None:
+ return
+ str = string.replace(str, "'", ' ')
+ str = string.replace(str, '"', ' ')
+ str = string.replace(str, "/", ' ')
+ str = string.replace(str, '*', ' ')
+ str = string.replace(str, "[", ' ')
+ str = string.replace(str, "]", ' ')
+ str = string.replace(str, "(", ' ')
+ str = string.replace(str, ")", ' ')
+ str = string.replace(str, "<", ' ')
+ str = string.replace(str, '>', ' ')
+ str = string.replace(str, "&", ' ')
+ str = string.replace(str, '#', ' ')
+ str = string.replace(str, ",", ' ')
+ str = string.replace(str, '.', ' ')
+ str = string.replace(str, ';', ' ')
+ tokens = string.split(str)
+ for token in tokens:
+ try:
+ c = token[0]
+ if string.find(string.letters, c) < 0:
+ pass
+ elif len(token) < 3:
+ pass
+ else:
+ lower = string.lower(token)
+ # TODO: generalize this a bit
+ if lower == 'and' or lower == 'the':
+ pass
+ elif self.xref.has_key(token):
+ self.xref[token].append(id)
+ else:
+ self.xref[token] = [id]
+ except:
+ pass
def analyze(self):
print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
- self.idx.analyze()
+ self.idx.analyze()
def scanHeaders(self):
- for header in self.headers.keys():
- parser = CParser(header)
- idx = parser.parse()
- self.headers[header] = idx;
- self.idx.merge(idx)
+ for header in self.headers.keys():
+ parser = CParser(header)
+ idx = parser.parse()
+ self.headers[header] = idx;
+ self.idx.merge(idx)
def scanModules(self):
- for module in self.modules.keys():
- parser = CParser(module)
- idx = parser.parse()
- # idx.analyze()
- self.modules[module] = idx
- self.idx.merge_public(idx)
+ for module in self.modules.keys():
+ parser = CParser(module)
+ idx = parser.parse()
+ # idx.analyze()
+ self.modules[module] = idx
+ self.idx.merge_public(idx)
def scan(self):
for directory in self.directories:
- files = glob.glob(directory + "/*.c")
- for file in files:
- skip = 1
- for incl in self.includes:
- if string.find(file, incl) != -1:
- skip = 0;
- break
- if skip == 0:
- self.modules[file] = None;
- files = glob.glob(directory + "/*.h")
- for file in files:
- skip = 1
- for incl in self.includes:
- if string.find(file, incl) != -1:
- skip = 0;
- break
- if skip == 0:
- self.headers[file] = None;
- self.scanHeaders()
- self.scanModules()
+ files = glob.glob(directory + "/*.c")
+ for file in files:
+ skip = 1
+ for incl in self.includes:
+ if string.find(file, incl) != -1:
+ skip = 0;
+ break
+ if skip == 0:
+ self.modules[file] = None;
+ files = glob.glob(directory + "/*.h")
+ for file in files:
+ skip = 1
+ for incl in self.includes:
+ if string.find(file, incl) != -1:
+ skip = 0;
+ break
+ if skip == 0:
+ self.headers[file] = None;
+ self.scanHeaders()
+ self.scanModules()
def modulename_file(self, file):
module = os.path.basename(file)
- if module[-2:] == '.h':
- module = module[:-2]
- elif module[-2:] == '.c':
- module = module[:-2]
- return module
+ if module[-2:] == '.h':
+ module = module[:-2]
+ elif module[-2:] == '.c':
+ module = module[:-2]
+ return module
def serialize_enum(self, output, name):
id = self.idx.enums[name]
output.write(" <enum name='%s' file='%s'" % (name,
- self.modulename_file(id.header)))
- if id.info != None:
- info = id.info
- if info[0] != None and info[0] != '':
- try:
- val = eval(info[0])
- except:
- val = info[0]
- output.write(" value='%s'" % (val));
- if info[2] != None and info[2] != '':
- output.write(" type='%s'" % info[2]);
- if info[1] != None and info[1] != '':
- output.write(" info='%s'" % escape(info[1]));
+ self.modulename_file(id.header)))
+ if id.info != None:
+ info = id.info
+ if info[0] != None and info[0] != '':
+ try:
+ val = eval(info[0])
+ except:
+ val = info[0]
+ output.write(" value='%s'" % (val));
+ if info[2] != None and info[2] != '':
+ output.write(" type='%s'" % info[2]);
+ if info[1] != None and info[1] != '':
+ output.write(" info='%s'" % escape(info[1]));
output.write("/>\n")
def serialize_macro(self, output, name):
id = self.idx.macros[name]
output.write(" <macro name='%s' file='%s'>\n" % (name,
- self.modulename_file(id.header)))
- if id.info != None:
+ self.modulename_file(id.header)))
+ if id.info != None:
try:
- (args, desc) = id.info
- if desc != None and desc != "":
- output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
- self.indexString(name, desc)
- for arg in args:
- (name, desc) = arg
- if desc != None and desc != "":
- output.write(" <arg name='%s' info='%s'/>\n" % (
- name, escape(desc)))
- self.indexString(name, desc)
- else:
- output.write(" <arg name='%s'/>\n" % (name))
+ (args, desc) = id.info
+ if desc != None and desc != "":
+ output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
+ self.indexString(name, desc)
+ for arg in args:
+ (name, desc) = arg
+ if desc != None and desc != "":
+ output.write(" <arg name='%s' info='%s'/>\n" % (
+ name, escape(desc)))
+ self.indexString(name, desc)
+ else:
+ output.write(" <arg name='%s'/>\n" % (name))
except:
pass
output.write(" </macro>\n")
def serialize_typedef(self, output, name):
id = self.idx.typedefs[name]
- if id.info[0:7] == 'struct ':
- output.write(" <struct name='%s' file='%s' type='%s'" % (
- name, self.modulename_file(id.header), id.info))
- name = id.info[7:]
- if self.idx.structs.has_key(name) and ( \
- type(self.idx.structs[name].info) == type(()) or
- type(self.idx.structs[name].info) == type([])):
- output.write(">\n");
- try:
- for field in self.idx.structs[name].info:
- desc = field[2]
- self.indexString(name, desc)
- if desc == None:
- desc = ''
- else:
- desc = escape(desc)
- output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
- except:
- print "Failed to serialize struct %s" % (name)
- output.write(" </struct>\n")
- else:
- output.write("/>\n");
- else :
- output.write(" <typedef name='%s' file='%s' type='%s'" % (
- name, self.modulename_file(id.header), id.info))
+ if id.info[0:7] == 'struct ':
+ output.write(" <struct name='%s' file='%s' type='%s'" % (
+ name, self.modulename_file(id.header), id.info))
+ name = id.info[7:]
+ if self.idx.structs.has_key(name) and ( \
+ type(self.idx.structs[name].info) == type(()) or
+ type(self.idx.structs[name].info) == type([])):
+ output.write(">\n");
+ try:
+ for field in self.idx.structs[name].info:
+ desc = field[2]
+ self.indexString(name, desc)
+ if desc == None:
+ desc = ''
+ else:
+ desc = escape(desc)
+ output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
+ except:
+ print "Failed to serialize struct %s" % (name)
+ output.write(" </struct>\n")
+ else:
+ output.write("/>\n");
+ else :
+ output.write(" <typedef name='%s' file='%s' type='%s'" % (
+ name, self.modulename_file(id.header), id.info))
try:
- desc = id.extra
- if desc != None and desc != "":
- output.write(">\n <info><![CDATA[%s]]></info>\n" % (desc))
- output.write(" </typedef>\n")
- else:
- output.write("/>\n")
- except:
- output.write("/>\n")
+ desc = id.extra
+ if desc != None and desc != "":
+ output.write(">\n <info><![CDATA[%s]]></info>\n" % (desc))
+ output.write(" </typedef>\n")
+ else:
+ output.write("/>\n")
+ except:
+ output.write("/>\n")
def serialize_variable(self, output, name):
id = self.idx.variables[name]
- if id.info != None:
- output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
- name, self.modulename_file(id.header), id.info))
- else:
- output.write(" <variable name='%s' file='%s'/>\n" % (
- name, self.modulename_file(id.header)))
+ if id.info != None:
+ output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
+ name, self.modulename_file(id.header), id.info))
+ else:
+ output.write(" <variable name='%s' file='%s'/>\n" % (
+ name, self.modulename_file(id.header)))
def serialize_function(self, output, name):
id = self.idx.functions[name]
- if name == debugsym:
- print "=>", id
+ if name == debugsym:
+ print "=>", id
output.write(" <%s name='%s' file='%s' module='%s'>\n" % (id.type,
- name, self.modulename_file(id.header),
- self.modulename_file(id.module)))
- #
- # Processing of conditionals modified by Bill 1/1/05
- #
- if id.conditionals != None:
- apstr = ""
- for cond in id.conditionals:
- if apstr != "":
- apstr = apstr + " && "
- apstr = apstr + cond
- output.write(" <cond>%s</cond>\n"% (apstr));
- try:
- (ret, params, desc) = id.info
- output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
- self.indexString(name, desc)
- if ret[0] != None:
- if ret[0] == "void":
- output.write(" <return type='void'/>\n")
- else:
- output.write(" <return type='%s' info='%s'/>\n" % (
- ret[0], escape(ret[1])))
- self.indexString(name, ret[1])
- for param in params:
- if param[0] == 'void':
- continue
- if param[2] == None:
- output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
- else:
- output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
- self.indexString(name, param[2])
- except:
- print "Failed to save function %s info: " % name, `id.info`
+ name, self.modulename_file(id.header),
+ self.modulename_file(id.module)))
+ #
+ # Processing of conditionals modified by Bill 1/1/05
+ #
+ if id.conditionals != None:
+ apstr = ""
+ for cond in id.conditionals:
+ if apstr != "":
+ apstr = apstr + " && "
+ apstr = apstr + cond
+ output.write(" <cond>%s</cond>\n"% (apstr));
+ try:
+ (ret, params, desc) = id.info
+ output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
+ self.indexString(name, desc)
+ if ret[0] != None:
+ if ret[0] == "void":
+ output.write(" <return type='void'/>\n")
+ else:
+ output.write(" <return type='%s' info='%s'/>\n" % (
+ ret[0], escape(ret[1])))
+ self.indexString(name, ret[1])
+ for param in params:
+ if param[0] == 'void':
+ continue
+ if param[2] == None:
+ output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
+ else:
+ output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
+ self.indexString(name, param[2])
+ except:
+ print "Failed to save function %s info: " % name, `id.info`
output.write(" </%s>\n" % (id.type))
def serialize_exports(self, output, file):
module = self.modulename_file(file)
- output.write(" <file name='%s'>\n" % (module))
- dict = self.headers[file]
- if dict.info != None:
- for data in ('Summary', 'Description', 'Author'):
- try:
- output.write(" <%s>%s</%s>\n" % (
- string.lower(data),
- escape(dict.info[data]),
- string.lower(data)))
- except:
- print "Header %s lacks a %s description" % (module, data)
- if dict.info.has_key('Description'):
- desc = dict.info['Description']
- if string.find(desc, "DEPRECATED") != -1:
- output.write(" <deprecated/>\n")
+ output.write(" <file name='%s'>\n" % (module))
+ dict = self.headers[file]
+ if dict.info != None:
+ for data in ('Summary', 'Description', 'Author'):
+ try:
+ output.write(" <%s>%s</%s>\n" % (
+ string.lower(data),
+ escape(dict.info[data]),
+ string.lower(data)))
+ except:
+ print "Header %s lacks a %s description" % (module, data)
+ if dict.info.has_key('Description'):
+ desc = dict.info['Description']
+ if string.find(desc, "DEPRECATED") != -1:
+ output.write(" <deprecated/>\n")
ids = dict.macros.keys()
- ids.sort()
- for id in uniq(ids):
- # Macros are sometime used to masquerade other types.
- if dict.functions.has_key(id):
- continue
- if dict.variables.has_key(id):
- continue
- if dict.typedefs.has_key(id):
- continue
- if dict.structs.has_key(id):
- continue
- if dict.enums.has_key(id):
- continue
- output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ # Macros are sometime used to masquerade other types.
+ if dict.functions.has_key(id):
+ continue
+ if dict.variables.has_key(id):
+ continue
+ if dict.typedefs.has_key(id):
+ continue
+ if dict.structs.has_key(id):
+ continue
+ if dict.enums.has_key(id):
+ continue
+ output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
ids = dict.enums.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
ids = dict.typedefs.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
ids = dict.structs.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
ids = dict.variables.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
ids = dict.functions.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='function'/>\n" % (id))
- output.write(" </file>\n")
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='function'/>\n" % (id))
+ output.write(" </file>\n")
def serialize_xrefs_files(self, output):
headers = self.headers.keys()
headers.sort()
for file in headers:
- module = self.modulename_file(file)
- output.write(" <file name='%s'>\n" % (module))
- dict = self.headers[file]
- ids = uniq(dict.functions.keys() + dict.variables.keys() + \
- dict.macros.keys() + dict.typedefs.keys() + \
- dict.structs.keys() + dict.enums.keys())
- ids.sort()
- for id in ids:
- output.write(" <ref name='%s'/>\n" % (id))
- output.write(" </file>\n")
+ module = self.modulename_file(file)
+ output.write(" <file name='%s'>\n" % (module))
+ dict = self.headers[file]
+ ids = uniq(dict.functions.keys() + dict.variables.keys() + \
+ dict.macros.keys() + dict.typedefs.keys() + \
+ dict.structs.keys() + dict.enums.keys())
+ ids.sort()
+ for id in ids:
+ output.write(" <ref name='%s'/>\n" % (id))
+ output.write(" </file>\n")
pass
def serialize_xrefs_functions(self, output):
funcs = {}
- for name in self.idx.functions.keys():
- id = self.idx.functions[name]
- try:
- (ret, params, desc) = id.info
- for param in params:
- if param[0] == 'void':
- continue
- if funcs.has_key(param[0]):
- funcs[param[0]].append(name)
- else:
- funcs[param[0]] = [name]
- except:
- pass
- typ = funcs.keys()
- typ.sort()
- for type in typ:
- if type == '' or type == 'void' or type == "int" or \
- type == "char *" or type == "const char *" :
- continue
- output.write(" <type name='%s'>\n" % (type))
- ids = funcs[type]
- ids.sort()
- pid = '' # not sure why we have dups, but get rid of them!
- for id in ids:
- if id != pid:
- output.write(" <ref name='%s'/>\n" % (id))
- pid = id
- output.write(" </type>\n")
+ for name in self.idx.functions.keys():
+ id = self.idx.functions[name]
+ try:
+ (ret, params, desc) = id.info
+ for param in params:
+ if param[0] == 'void':
+ continue
+ if funcs.has_key(param[0]):
+ funcs[param[0]].append(name)
+ else:
+ funcs[param[0]] = [name]
+ except:
+ pass
+ typ = funcs.keys()
+ typ.sort()
+ for type in typ:
+ if type == '' or type == 'void' or type == "int" or \
+ type == "char *" or type == "const char *" :
+ continue
+ output.write(" <type name='%s'>\n" % (type))
+ ids = funcs[type]
+ ids.sort()
+ pid = '' # not sure why we have dups, but get rid of them!
+ for id in ids:
+ if id != pid:
+ output.write(" <ref name='%s'/>\n" % (id))
+ pid = id
+ output.write(" </type>\n")
def serialize_xrefs_constructors(self, output):
funcs = {}
- for name in self.idx.functions.keys():
- id = self.idx.functions[name]
- try:
- (ret, params, desc) = id.info
- if ret[0] == "void":
- continue
- if funcs.has_key(ret[0]):
- funcs[ret[0]].append(name)
- else:
- funcs[ret[0]] = [name]
- except:
- pass
- typ = funcs.keys()
- typ.sort()
- for type in typ:
- if type == '' or type == 'void' or type == "int" or \
- type == "char *" or type == "const char *" :
- continue
- output.write(" <type name='%s'>\n" % (type))
- ids = funcs[type]
- ids.sort()
- for id in ids:
- output.write(" <ref name='%s'/>\n" % (id))
- output.write(" </type>\n")
+ for name in self.idx.functions.keys():
+ id = self.idx.functions[name]
+ try:
+ (ret, params, desc) = id.info
+ if ret[0] == "void":
+ continue
+ if funcs.has_key(ret[0]):
+ funcs[ret[0]].append(name)
+ else:
+ funcs[ret[0]] = [name]
+ except:
+ pass
+ typ = funcs.keys()
+ typ.sort()
+ for type in typ:
+ if type == '' or type == 'void' or type == "int" or \
+ type == "char *" or type == "const char *" :
+ continue
+ output.write(" <type name='%s'>\n" % (type))
+ ids = funcs[type]
+ ids.sort()
+ for id in ids:
+ output.write(" <ref name='%s'/>\n" % (id))
+ output.write(" </type>\n")
def serialize_xrefs_alpha(self, output):
- letter = None
- ids = self.idx.identifiers.keys()
- ids.sort()
- for id in ids:
- if id[0] != letter:
- if letter != None:
- output.write(" </letter>\n")
- letter = id[0]
- output.write(" <letter name='%s'>\n" % (letter))
- output.write(" <ref name='%s'/>\n" % (id))
- if letter != None:
- output.write(" </letter>\n")
+ letter = None
+ ids = self.idx.identifiers.keys()
+ ids.sort()
+ for id in ids:
+ if id[0] != letter:
+ if letter != None:
+ output.write(" </letter>\n")
+ letter = id[0]
+ output.write(" <letter name='%s'>\n" % (letter))
+ output.write(" <ref name='%s'/>\n" % (id))
+ if letter != None:
+ output.write(" </letter>\n")
def serialize_xrefs_references(self, output):
typ = self.idx.identifiers.keys()
- typ.sort()
- for id in typ:
- idf = self.idx.identifiers[id]
- module = idf.header
- output.write(" <reference name='%s' href='%s'/>\n" % (id,
- 'html/' + self.basename + '-' +
- self.modulename_file(module) + '.html#' +
- id))
+ typ.sort()
+ for id in typ:
+ idf = self.idx.identifiers[id]
+ module = idf.header
+ output.write(" <reference name='%s' href='%s'/>\n" % (id,
+ 'html/' + self.basename + '-' +
+ self.modulename_file(module) + '.html#' +
+ id))
def serialize_xrefs_index(self, output):
index = self.xref
- typ = index.keys()
- typ.sort()
- letter = None
- count = 0
- chunk = 0
- chunks = []
- for id in typ:
- if len(index[id]) > 30:
- continue
- if id[0] != letter:
- if letter == None or count > 200:
- if letter != None:
- output.write(" </letter>\n")
- output.write(" </chunk>\n")
- count = 0
- chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
- output.write(" <chunk name='chunk%s'>\n" % (chunk))
- first_letter = id[0]
- chunk = chunk + 1
- elif letter != None:
- output.write(" </letter>\n")
- letter = id[0]
- output.write(" <letter name='%s'>\n" % (letter))
- output.write(" <word name='%s'>\n" % (id))
- tokens = index[id];
- tokens.sort()
- tok = None
- for token in tokens:
- if tok == token:
- continue
- tok = token
- output.write(" <ref name='%s'/>\n" % (token))
- count = count + 1
- output.write(" </word>\n")
- if letter != None:
- output.write(" </letter>\n")
- output.write(" </chunk>\n")
- if count != 0:
- chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
- output.write(" <chunks>\n")
- for ch in chunks:
- output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
- ch[0], ch[1], ch[2]))
- output.write(" </chunks>\n")
+ typ = index.keys()
+ typ.sort()
+ letter = None
+ count = 0
+ chunk = 0
+ chunks = []
+ for id in typ:
+ if len(index[id]) > 30:
+ continue
+ if id[0] != letter:
+ if letter == None or count > 200:
+ if letter != None:
+ output.write(" </letter>\n")
+ output.write(" </chunk>\n")
+ count = 0
+ chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
+ output.write(" <chunk name='chunk%s'>\n" % (chunk))
+ first_letter = id[0]
+ chunk = chunk + 1
+ elif letter != None:
+ output.write(" </letter>\n")
+ letter = id[0]
+ output.write(" <letter name='%s'>\n" % (letter))
+ output.write(" <word name='%s'>\n" % (id))
+ tokens = index[id];
+ tokens.sort()
+ tok = None
+ for token in tokens:
+ if tok == token:
+ continue
+ tok = token
+ output.write(" <ref name='%s'/>\n" % (token))
+ count = count + 1
+ output.write(" </word>\n")
+ if letter != None:
+ output.write(" </letter>\n")
+ output.write(" </chunk>\n")
+ if count != 0:
+ chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
+ output.write(" <chunks>\n")
+ for ch in chunks:
+ output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
+ ch[0], ch[1], ch[2]))
+ output.write(" </chunks>\n")
def serialize_xrefs(self, output):
- output.write(" <references>\n")
- self.serialize_xrefs_references(output)
- output.write(" </references>\n")
- output.write(" <alpha>\n")
- self.serialize_xrefs_alpha(output)
- output.write(" </alpha>\n")
- output.write(" <constructors>\n")
- self.serialize_xrefs_constructors(output)
- output.write(" </constructors>\n")
- output.write(" <functions>\n")
- self.serialize_xrefs_functions(output)
- output.write(" </functions>\n")
- output.write(" <files>\n")
- self.serialize_xrefs_files(output)
- output.write(" </files>\n")
- output.write(" <index>\n")
- self.serialize_xrefs_index(output)
- output.write(" </index>\n")
+ output.write(" <references>\n")
+ self.serialize_xrefs_references(output)
+ output.write(" </references>\n")
+ output.write(" <alpha>\n")
+ self.serialize_xrefs_alpha(output)
+ output.write(" </alpha>\n")
+ output.write(" <constructors>\n")
+ self.serialize_xrefs_constructors(output)
+ output.write(" </constructors>\n")
+ output.write(" <functions>\n")
+ self.serialize_xrefs_functions(output)
+ output.write(" </functions>\n")
+ output.write(" <files>\n")
+ self.serialize_xrefs_files(output)
+ output.write(" </files>\n")
+ output.write(" <index>\n")
+ self.serialize_xrefs_index(output)
+ output.write(" </index>\n")
def serialize(self):
filename = "%s/%s-api.xml" % (self.path, self.name)
@@ -2124,10 +2124,10 @@ def rebuild():
print "Rebuilding API description for libvirt"
builder = docBuilder("libvirt", srcdir,
["src", "src/util", "include/libvirt"],
- [])
+ [])
else:
print "rebuild() failed, unable to guess the module"
- return None
+ return None
builder.scan()
builder.analyze()
builder.serialize()
@@ -2146,4 +2146,4 @@ if __name__ == "__main__":
debug = 1
parse(sys.argv[1])
else:
- rebuild()
+ rebuild()
--
1.7.4.1
2
2
A new release of the Perl bindings, Sys::Virt, is now available from:
http://search.cpan.org/~danberr/Sys-Virt-0.2.6/
Direct download link:
http://search.cpan.org/CPAN/authors/id/D/DA/DANBERR/Sys-Virt-0.2.6.tar.gz
This supports all the APIs upto and including libvirt 0.8.7, with the
exception of the stream APIs
Regards,
Daniel
--
|: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org -o- http://virt-manager.org :|
|: http://autobuild.org -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|
1
0
[libvirt] [BUG] storage_backend_fs: failung virStorageBackendProbeTarget() disabled whole pool
by Philipp Hahn 17 Feb '11
by Philipp Hahn 17 Feb '11
17 Feb '11
Hello,
I have a problem with the following code fragment, which refreshes a directory
based storage pool:
storage_backend_fs.c#virStorageBackendFileSystemRefresh(...)
...
while ((ent = readdir(dir)) != NULL) {
...
if ((ret = virStorageBackendProbeTarget(...) < 0) {
if (ret == -1)
goto cleanup;
...
}
...
}
closedir(dir);
...
return 0;
cleanup:
...
return -1;
}
This disables the whole pool, if it contains an Qcow2 volume while, whose
backfile is (currently) unavailable (because, for example, the central NFS
store is currently unavailable or the permissions don't allow reading). This
is very annoying, since it's hard to find the Qcow2 volume, which breaks the
pool. I use the following command to find broken volumes:
find "$dir" -maxdepth 1 \( -type f -o -type l \) \( -exec kvm-img info {}
\; -o -print \) 2>/dev/null
Even worse, you can't delete the broken or re-create the missing volume with
virsh allone.
To reproduce:
dir=$(mktemp -d)
virsh pool-create-as tmp dir '' '' '' '' "$dir"
virsh vol-create-as --format qcow2 tmp back 1G
virsh vol-create-as --format qcow2 --backing-vol-format qcow2 --backing-vol
back tmp cow 1G
virsh vol-delete --pool tmp back
virsh pool-refresh tmp
After the last step, the pool will be gone (because it was not persistent). As
long as the now broken image stays in the directory, you will not be able to
re-create or re-start the pool.
The easiest 'fix' would be to ignore all errors regarding the detection of the
backing files file format. This would at least allow users to still create
new volumes and list existing volumes.
I appreciate any comments.
BYtE
Philipp
--
Philipp Hahn Open Source Software Engineer hahn(a)univention.de
Univention GmbH Linux for Your Business fon: +49 421 22 232- 0
Mary-Somerville-Str.1 28359 Bremen fax: +49 421 22 232-99
http://www.univention.de/
** Besuchen Sie uns auf der CeBIT in Hannover **
** Auf dem Univention Stand D36 in Halle 2 **
** Vom 01. bis 05. März 2011 **
2
1
If we don't check it, virsh users will get "Disk detached successfully"
even when qemuMonitorDriveDel fails.
---
src/qemu/qemu_hotplug.c | 10 ++++++++--
src/qemu/qemu_monitor_json.c | 2 +-
2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index fb9db5a..70e9d8e 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -1187,7 +1187,10 @@ int qemuDomainDetachPciDiskDevice(struct qemud_driver *driver,
}
/* disconnect guest from host device */
- qemuMonitorDriveDel(priv->mon, drivestr);
+ if (qemuMonitorDriveDel(priv->mon, drivestr) != 0) {
+ qemuDomainObjExitMonitor(vm);
+ goto cleanup;
+ }
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -1269,7 +1272,10 @@ int qemuDomainDetachSCSIDiskDevice(struct qemud_driver *driver,
}
/* disconnect guest from host device */
- qemuMonitorDriveDel(priv->mon, drivestr);
+ if (qemuMonitorDriveDel(priv->mon, drivestr) != 0) {
+ qemuDomainObjExitMonitor(vm);
+ goto cleanup;
+ }
qemuDomainObjExitMonitorWithDriver(driver, vm);
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index d5e8d37..b088405 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -2338,7 +2338,7 @@ int qemuMonitorJSONDriveDel(qemuMonitorPtr mon,
if (ret == 0) {
/* See if drive_del isn't supported */
if (qemuMonitorJSONHasError(reply, "CommandNotFound")) {
- VIR_ERROR0(_("deleting disk is not supported. "
+ qemuReportError(VIR_ERR_NO_SUPPORT, _("deleting disk is not supported. "
"This may leak data if disk is reassigned"));
ret = 1;
goto cleanup;
--
1.7.3.1
2
1
Hi,
i'd like to let you know about current status of this project. There is
a wiki page http://wiki.libvirt.org/page/Libvirt-snmp which is currently
a documentation.
I am currently waiting for ACK for patch I've sent (snmp trap support) -
so if anyone's interested and have a lot of free time ... :)
I'd like to also ask for a hint. Those of you who tried libvirt-snmp out
might already have noticed. snmpwalk displays a ugly domain UUID:
http://wiki.libvirt.org/page/Libvirt-snmp#Examples_of_use
Does anybody know how to get rid of it?
2
1
17 Feb '11
The name convention of device mapper disk is different, and 'parted'
can't be used to delete a device mapper disk partition. e.g.
Name Path
-----------------------------------------
3600a0b80005ad1d7000093604cae912fp1 /dev/mapper/3600a0b80005ad1d7000093604cae912fp1
Error: Expecting a partition number.
This patch introduces 'dmsetup' to fix it.
Changes:
- New function "virIsDevMapperDevice" in "src/utils/utils.c"
- remove "is_dm_device" in "src/storage/parthelper.c", use
"virIsDevMapperDevice" instead.
- Requires "device-mapper" for 'with-storage-disk" in "libvirt.spec.in"
- Check "dmsetup" in 'configure.ac' for "with-storage-disk"
- Changes on "src/Makefile.am" to link against libdevmapper
- New entry for "virIsDevMapperDevice" in "src/libvirt_private.syms"
Changes from v1 to v3:
- s/virIsDeviceMapperDevice/virIsDevMapperDevice/g
- replace "virRun" with "virCommand"
- sort the list of util functions in "libvirt_private.syms"
- ATTRIBUTE_NONNULL(1) for virIsDevMapperDevice declaration.
e.g.
Name Path
-----------------------------------------
3600a0b80005ad1d7000093604cae912fp1 /dev/mapper/3600a0b80005ad1d7000093604cae912fp1
Vol /dev/mapper/3600a0b80005ad1d7000093604cae912fp1 deleted
Name Path
-----------------------------------------
---
configure.ac | 28 +++++++++++++++------
libvirt.spec.in | 1 +
src/Makefile.am | 8 +++---
src/libvirt_private.syms | 2 +-
src/storage/parthelper.c | 14 +---------
src/storage/storage_backend_disk.c | 48 +++++++++++++++++++++--------------
src/util/util.c | 15 +++++++++++
src/util/util.h | 2 +
8 files changed, 73 insertions(+), 45 deletions(-)
diff --git a/configure.ac b/configure.ac
index 3cd824a..44a3b19 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1725,12 +1725,19 @@ LIBPARTED_LIBS=
if test "$with_storage_disk" = "yes" ||
test "$with_storage_disk" = "check"; then
AC_PATH_PROG([PARTED], [parted], [], [$PATH:/sbin:/usr/sbin])
+ AC_PATH_PROG([DMSETUP], [dmsetup], [], [$PATH:/sbin:/usr/sbin])
if test -z "$PARTED" ; then
PARTED_FOUND=no
else
PARTED_FOUND=yes
fi
+ if test -z "$DMSETUP" ; then
+ DMSETUP_FOUND=no
+ else
+ DMSETUP_FOUND=yes
+ fi
+
if test "$PARTED_FOUND" = "yes" && test "x$PKG_CONFIG" != "x" ; then
PKG_CHECK_MODULES([LIBPARTED], [libparted >= $PARTED_REQUIRED], [],
[PARTED_FOUND=no])
@@ -1748,14 +1755,17 @@ if test "$with_storage_disk" = "yes" ||
CFLAGS="$save_CFLAGS"
fi
- if test "$PARTED_FOUND" = "no" ; then
- if test "$with_storage_disk" = "yes" ; then
- AC_MSG_ERROR([We need parted for disk storage driver])
- else
- with_storage_disk=no
- fi
- else
- with_storage_disk=yes
+ if test "$with_storage_disk" = "yes" &&
+ test "$PARTED_FOUND:$DMSETUP_FOUND" != "yes:yes"; then
+ AC_MSG_ERROR([Need both parted and dmsetup for disk storage driver])
+ fi
+
+ if test "$with_storage_disk" = "check"; then
+ if test "$PARTED_FOUND:$DMSETUP_FOUND" != "yes:yes"; then
+ with_storage_disk=no
+ else
+ with_storage_disk=yes
+ fi
fi
if test "$with_storage_disk" = "yes"; then
@@ -1763,6 +1773,8 @@ if test "$with_storage_disk" = "yes" ||
[whether Disk backend for storage driver is enabled])
AC_DEFINE_UNQUOTED([PARTED],["$PARTED"],
[Location or name of the parted program])
+ AC_DEFINE_UNQUOTED([DMSETUP],["$DMSETUP"],
+ [Location or name of the dmsetup program])
fi
fi
AM_CONDITIONAL([WITH_STORAGE_DISK], [test "$with_storage_disk" = "yes"])
diff --git a/libvirt.spec.in b/libvirt.spec.in
index 5021f45..8cb8fad 100644
--- a/libvirt.spec.in
+++ b/libvirt.spec.in
@@ -279,6 +279,7 @@ Requires: iscsi-initiator-utils
%if %{with_storage_disk}
# For disk driver
Requires: parted
+Requires: device-mapper
%endif
%if %{with_storage_mpath}
# For multipath support
diff --git a/src/Makefile.am b/src/Makefile.am
index 2f94efd..6e14043 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -437,9 +437,9 @@ libvirt_la_BUILT_LIBADD = libvirt_util.la
libvirt_util_la_SOURCES = \
$(UTIL_SOURCES)
libvirt_util_la_CFLAGS = $(CAPNG_CFLAGS) $(YAJL_CFLAGS) $(LIBNL_CFLAGS) \
- $(AM_CFLAGS) $(AUDIT_CFLAGS)
+ $(AM_CFLAGS) $(AUDIT_CFLAGS) $(DEVMAPPER_CFLAGS)
libvirt_util_la_LIBADD = $(CAPNG_LIBS) $(YAJL_LIBS) $(LIBNL_LIBS) \
- $(LIB_PTHREAD) $(AUDIT_LIBS)
+ $(LIB_PTHREAD) $(AUDIT_LIBS) $(DEVMAPPER_LIBS)
noinst_LTLIBRARIES += libvirt_conf.la
@@ -1154,7 +1154,6 @@ libvirt_parthelper_SOURCES = $(STORAGE_HELPER_DISK_SOURCES)
libvirt_parthelper_LDFLAGS = $(WARN_LDFLAGS) $(AM_LDFLAGS)
libvirt_parthelper_LDADD = \
$(LIBPARTED_LIBS) \
- $(DEVMAPPER_LIBS) \
libvirt_util.la \
../gnulib/lib/libgnu.la
@@ -1179,7 +1178,8 @@ libvirt_lxc_SOURCES = \
libvirt_lxc_LDFLAGS = $(WARN_CFLAGS) $(AM_LDFLAGS)
libvirt_lxc_LDADD = $(CAPNG_LIBS) $(YAJL_LIBS) \
$(LIBXML_LIBS) $(NUMACTL_LIBS) $(LIB_PTHREAD) \
- $(LIBNL_LIBS) $(AUDIT_LIBS) ../gnulib/lib/libgnu.la
+ $(LIBNL_LIBS) $(AUDIT_LIBS) $(DEVMAPPER_LIBS) \
+ ../gnulib/lib/libgnu.la
libvirt_lxc_CFLAGS = \
$(LIBPARTED_CFLAGS) \
$(NUMACTL_CFLAGS) \
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index b9e3efe..1ab3da3 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -884,6 +884,7 @@ virGetUserID;
virGetUserName;
virHexToBin;
virIndexToDiskName;
+virIsDevMapperDevice;
virKillProcess;
virMacAddrCompare;
virParseMacAddr;
@@ -910,7 +911,6 @@ virStrncpy;
virTimestamp;
virVasprintf;
-
# uuid.h
virGetHostUUID;
virSetHostUUIDStr;
diff --git a/src/storage/parthelper.c b/src/storage/parthelper.c
index 6ef413d..acc9171 100644
--- a/src/storage/parthelper.c
+++ b/src/storage/parthelper.c
@@ -59,18 +59,6 @@ enum diskCommand {
DISK_GEOMETRY
};
-static int
-is_dm_device(const char *devname)
-{
- struct stat buf;
-
- if (devname && !stat(devname, &buf) && dm_is_dm_major(major(buf.st_rdev))) {
- return 1;
- }
-
- return 0;
-}
-
int main(int argc, char **argv)
{
PedDevice *dev;
@@ -96,7 +84,7 @@ int main(int argc, char **argv)
}
path = argv[1];
- if (is_dm_device(path)) {
+ if (virIsDevMapperDevice(path)) {
partsep = "p";
canonical_path = strdup(path);
if (canonical_path == NULL) {
diff --git a/src/storage/storage_backend_disk.c b/src/storage/storage_backend_disk.c
index c7ade6b..70e3ba6 100644
--- a/src/storage/storage_backend_disk.c
+++ b/src/storage/storage_backend_disk.c
@@ -31,6 +31,7 @@
#include "storage_backend_disk.h"
#include "util.h"
#include "memory.h"
+#include "command.h"
#include "configmake.h"
#define VIR_FROM_THIS VIR_FROM_STORAGE
@@ -647,6 +648,8 @@ virStorageBackendDiskDeleteVol(virConnectPtr conn ATTRIBUTE_UNUSED,
char *part_num = NULL;
char *devpath = NULL;
char *devname, *srcname;
+ virCommandPtr cmd = NULL;
+ bool isDevMapperDevice;
int rc = -1;
if (virFileResolveLink(vol->target.path, &devpath) < 0) {
@@ -660,36 +663,43 @@ virStorageBackendDiskDeleteVol(virConnectPtr conn ATTRIBUTE_UNUSED,
srcname = basename(pool->def->source.devices[0].path);
DEBUG("devname=%s, srcname=%s", devname, srcname);
- if (!STRPREFIX(devname, srcname)) {
+ isDevMapperDevice = virIsDevMapperDevice(devpath);
+
+ if (!isDevMapperDevice && !STRPREFIX(devname, srcname)) {
virStorageReportError(VIR_ERR_INTERNAL_ERROR,
_("Volume path '%s' did not start with parent "
"pool source device name."), devname);
goto cleanup;
}
- part_num = devname + strlen(srcname);
+ if (!isDevMapperDevice) {
+ part_num = devname + strlen(srcname);
- if (*part_num == 0) {
- virStorageReportError(VIR_ERR_INTERNAL_ERROR,
- _("cannot parse partition number from target "
- "'%s'"), devname);
- goto cleanup;
- }
+ if (*part_num == 0) {
+ virStorageReportError(VIR_ERR_INTERNAL_ERROR,
+ _("cannot parse partition number from target "
+ "'%s'"), devname);
+ goto cleanup;
+ }
- /* eg parted /dev/sda rm 2 */
- const char *prog[] = {
- PARTED,
- pool->def->source.devices[0].path,
- "rm",
- "--script",
- part_num,
- NULL,
- };
+ cmd = virCommandNewArgList(PARTED,
+ pool->def->source.devices[0].path,
+ "rm",
+ "--script",
+ part_num,
+ NULL);
- if (virRun(prog, NULL) < 0)
- goto cleanup;
+ if (virCommandRun(cmd, NULL) < 0)
+ goto cleanup;
+ } else {
+ cmd = virCommandNewArgList(DMSETUP, "remove", "--force", devpath, NULL);
+
+ if (virCommandRun(cmd, NULL) < 0)
+ goto cleanup;
+ }
rc = 0;
cleanup:
VIR_FREE(devpath);
+ virCommandFree(cmd);
return rc;
diff --git a/src/util/util.c b/src/util/util.c
index 6161be6..1ec61b8 100644
--- a/src/util/util.c
+++ b/src/util/util.c
@@ -45,6 +45,7 @@
#include <string.h>
#include <signal.h>
#include <termios.h>
+#include <libdevmapper.h>
#include "c-ctype.h"
#ifdef HAVE_PATHS_H
@@ -3123,3 +3124,17 @@ virTimestamp(void)
return timestamp;
}
+
+bool
+virIsDevMapperDevice(const char *devname)
+{
+ struct stat buf;
+
+ if (!stat(devname, &buf) &&
+ S_ISBLK(buf.st_mode) &&
+ dm_is_dm_major(major(buf.st_rdev)))
+ return true;
+
+ return false;
+}
+
diff --git a/src/util/util.h b/src/util/util.h
index 8373038..10d3253 100644
--- a/src/util/util.h
+++ b/src/util/util.h
@@ -296,4 +296,6 @@ int virBuildPathInternal(char **path, ...) ATTRIBUTE_SENTINEL;
char *virTimestamp(void);
+bool virIsDevMapperDevice(const char *devname) ATTRIBUTE_NONNULL(1);
+
#endif /* __VIR_UTIL_H__ */
--
1.7.4
3
4
Attempting to set a transient domain autostart results in
ERR_OPERATION_INVALID not ERR_INTERNAL_ERROR.
---
scripts/domain/051-transient-autostart.t | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/scripts/domain/051-transient-autostart.t b/scripts/domain/051-transient-autostart.t
index 3e5ea61..26ca82b 100644
--- a/scripts/domain/051-transient-autostart.t
+++ b/scripts/domain/051-transient-autostart.t
@@ -48,7 +48,7 @@ my $auto = $dom->get_autostart();
ok(!$auto, "autostart is disabled for transient VMs");
-ok_error(sub { $dom->set_autostart(1) }, "Set autostart not supported on transient VMs", Sys::Virt::Error::ERR_INTERNAL_ERROR);
+ok_error(sub { $dom->set_autostart(1) }, "Set autostart not supported on transient VMs", Sys::Virt::Error::ERR_OPERATION_INVALID);
diag "Destroying the transient domain";
$dom->destroy;
--
1.7.3.1
2
2
17 Feb '11
* src/qemu/qemu_driver.c
---
src/qemu/qemu_driver.c | 6 ++++++
1 files changed, 6 insertions(+), 0 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index c581cfe..8570e67 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4647,6 +4647,12 @@ static int qemudDomainSave(virDomainPtr dom, const char *path)
goto cleanup;
}
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is not running"));
+ goto cleanup;
+ }
+
ret = qemudDomainSaveFlag(driver, dom, vm, path, compressed);
cleanup:
--
1.7.4
3
5
[libvirt] [PATCH 1/2] build-sys: fix build when daemon is disabled by not installing libvirtd.8
by Diego Elio Pettenò 16 Feb '11
by Diego Elio Pettenò 16 Feb '11
16 Feb '11
Since the rule to build libvirtd.8 is within the WITH_LIBVIRTD conditional,
so declare the man page in there as well. Without this change, build
without daemon will fail.
---
.gnulib | 2 +-
daemon/Makefile.am | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.gnulib b/.gnulib
index 1629006..11fbc57 160000
--- a/.gnulib
+++ b/.gnulib
@@ -1 +1 @@
-Subproject commit 1629006348e1f66f07ce3ddcf3ebd2d14556cfce
+Subproject commit 11fbc57405a118e6ec9a3ebc19bbf5ececdae4d6
diff --git a/daemon/Makefile.am b/daemon/Makefile.am
index 963d64f..dbf0ac3 100644
--- a/daemon/Makefile.am
+++ b/daemon/Makefile.am
@@ -41,12 +41,12 @@ EXTRA_DIST = \
$(AVAHI_SOURCES) \
$(DAEMON_SOURCES)
-man_MANS = libvirtd.8
-
BUILT_SOURCES =
if WITH_LIBVIRTD
+man_MANS = libvirtd.8
+
sbin_PROGRAMS = libvirtd
confdir = $(sysconfdir)/libvirt/
--
1.7.2
4
9
Libxml2-Logo-90x34.gif was removed from the repository in Sep 2009
(commit d6d528c) because our docs no longer reference it.
* docs/Makefile.am (install-data-local): Don't install missing file.
---
Pushing this under the trivial rule.
docs/Makefile.am | 1 -
1 files changed, 0 insertions(+), 1 deletions(-)
diff --git a/docs/Makefile.am b/docs/Makefile.am
index 473bbbf..d0fe918 100644
--- a/docs/Makefile.am
+++ b/docs/Makefile.am
@@ -187,7 +187,6 @@ install-data-local:
$(mkinstalldirs) $(DESTDIR)$(HTML_DIR)
for f in $(css) $(dot_html) $(gif) $(png); do \
$(INSTALL) -m 0644 $(srcdir)/$$f $(DESTDIR)$(HTML_DIR); done
- -$(INSTALL) -m 0644 $(srcdir)/Libxml2-Logo-90x34.gif $(DESTDIR)$(HTML_DIR)
$(mkinstalldirs) $(DESTDIR)$(HTML_DIR)/html
for h in $(apihtml); do \
$(INSTALL) -m 0644 $(srcdir)/$$h $(DESTDIR)$(HTML_DIR)/html; done
--
1.7.4
1
0