[libvirt] [PATCH 0/2] Refactored XM and SEXPR parsing
by Markus Groß
Hi,
I refactored the XM and SEXPR parsing routines from the xen-unified
driver into a seperate directory (xenxs; x for xm and s for sexpr).
This way different xen-drivers besides xen-unified are able to
use the parsing functionality. For example the upcoming
XenLight (libxl) driver.
To use the XM parsing functions one includes "xen_xm.h" and for SEXPR parsing "xen_sxpr.h".
The patch is rather big, but most of it are code movements.
Some parsing functions required a driver object to fetch the tty path and vncport.
I removed all references to a specific driver and added
additional parameters as a replacement.
The tests sexpr2xml, xmconfig and xml2sexpr are adapted and show no error.
Thanks in advance for your comments about this.
Cheers,
Markus
Markus Groß (2):
Moved Xen SEXPR and XM parsing functionality to seperate directory.
Added to authors to please make syntax-check
AUTHORS | 1 +
configure.ac | 4 +
include/libvirt/virterror.h | 2 +-
src/Makefile.am | 15 +-
src/util/virterror.c | 4 +-
src/xen/sexpr.c | 568 ----
src/xen/sexpr.h | 55 -
src/xen/xen_driver.c | 19 +-
src/xen/xend_internal.c | 6673 ++++++++++++++-----------------------------
src/xen/xend_internal.h | 30 -
src/xen/xm_internal.c | 1706 +-----------
src/xen/xm_internal.h | 3 -
src/xenxs/sexpr.c | 647 +++++
src/xenxs/sexpr.h | 63 +
src/xenxs/xen_sxpr.c | 2149 ++++++++++++++
src/xenxs/xen_sxpr.h | 62 +
src/xenxs/xen_xm.c | 1716 +++++++++++
src/xenxs/xen_xm.h | 41 +
src/xenxs/xenxs_private.h | 63 +
tests/sexpr2xmltest.c | 12 +-
tests/xmconfigtest.c | 5 +-
tests/xml2sexprtest.c | 4 +-
22 files changed, 7012 insertions(+), 6830 deletions(-)
delete mode 100644 src/xen/sexpr.c
delete mode 100644 src/xen/sexpr.h
create mode 100644 src/xenxs/sexpr.c
create mode 100644 src/xenxs/sexpr.h
create mode 100644 src/xenxs/xen_sxpr.c
create mode 100644 src/xenxs/xen_sxpr.h
create mode 100644 src/xenxs/xen_xm.c
create mode 100644 src/xenxs/xen_xm.h
create mode 100644 src/xenxs/xenxs_private.h
--
1.7.4.1
13 years, 9 months
[libvirt] 0.8.8 and virtio console
by Ruben Kerkhof
Hi all,
This used to work on libvirt-0.8.7:
<console type='pty'>
<target type='virtio'/>
</console>
Using the same configuration with libvirt-0.8.8:
[root@ev004 ~]# virsh create /data/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355.xml
error: Failed to create domain from
/data/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355.xml
error: internal error no assigned pty for device console0
I'm using qemu-kvm-0.14.0-0.1.201102107aa8c46.
Here's the log from starting a guest with 0.8.7:
LC_ALL=C PATH=/sbin:/usr/sbin:/bin:/usr/bin QEMU_AUDIO_DRV=none
/usr/bin/qemu-kvm -S -M pc-0.14 -cpu
core2duo,+lahf_lm,+popcnt,+sse4.2,+sse4.1,+cx16,-monitor,-vme
-enable-kvm -m 512 -smp 2,sockets=2,cores=1,threads=1 -name
4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355 -uuid
4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355 -nodefconfig -nodefaults -chardev
socket,id=monitor,path=/var/lib/libvirt/qemu/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355.monitor,server,nowait
-mon chardev=monitor,mode=control -rtc base=utc -boot c -device
virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x5 -drive
file=/dev/vgdata/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355-root,if=none,id=drive-virtio-disk0,boot=on,format=raw,cache=none
-device virtio-blk-pci,bus=pci.0,addr=0x6,drive=drive-virtio-disk0,id=virtio-disk0
-drive file=/dev/vgdata/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355-swap,if=none,id=drive-virtio-disk1,format=raw,cache=none
-device virtio-blk-pci,bus=pci.0,addr=0x7,drive=drive-virtio-disk1,id=virtio-disk1
-netdev tap,fd=64,id=hostnet0,vhost=on,vhostfd=65 -device
virtio-net-pci,netdev=hostnet0,id=net0,mac=00:16:3e:50:a4:55,bus=pci.0,addr=0x3
-netdev tap,fd=66,id=hostnet1,vhost=on,vhostfd=67 -device
virtio-net-pci,netdev=hostnet1,id=net1,mac=00:16:3e:71:c1:49,bus=pci.0,addr=0x4
-chardev pty,id=console0 -device virtconsole,chardev=console0 -usb
-device usb-tablet,id=input0 -vnc 0.0.0.0:1,password -vga std
char device redirected to /dev/pts/14
And the same guest with 0.8.8:
LC_ALL=C PATH=/sbin:/usr/sbin:/bin:/usr/bin QEMU_AUDIO_DRV=none
/usr/bin/qemu-kvm -S -M pc-0.14 -cpu
core2duo,+lahf_lm,+popcnt,+sse4.2,+sse4.1,+cx16,-monitor,-vme
-enable-kvm -m 512 -smp 2,sockets=2,cores=1,threads=1 -name
4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355 -uuid
4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355 -nodefconfig -nodefaults -chardev
socket,id=charmonitor,path=/var/lib/libvirt/qemu/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355.monitor,server,nowait
-mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc -boot c
-device virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x5 -drive
file=/dev/vgdata/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355-root,if=none,id=drive-virtio-disk0,boot=on,format=raw,cache=none
-device virtio-blk-pci,bus=pci.0,addr=0x6,drive=drive-virtio-disk0,id=virtio-disk0
-drive file=/dev/vgdata/4d5d8a24-bb70-4eff-b1b5-3d8e5bd5c355-swap,if=none,id=drive-virtio-disk1,format=raw,cache=none
-device virtio-blk-pci,bus=pci.0,addr=0x7,drive=drive-virtio-disk1,id=virtio-disk1
-netdev tap,fd=33,id=hostnet0,vhost=on,vhostfd=34 -device
virtio-net-pci,netdev=hostnet0,id=net0,mac=00:16:3e:50:a4:55,bus=pci.0,addr=0x3
-netdev tap,fd=35,id=hostnet1,vhost=on,vhostfd=36 -device
virtio-net-pci,netdev=hostnet1,id=net1,mac=00:16:3e:71:c1:49,bus=pci.0,addr=0x4
-chardev pty,id=charconsole0 -device
virtconsole,chardev=charconsole0,id=console0 -usb -device
usb-tablet,id=input0 -vnc 0.0.0.0:1,password -vga std
char device redirected to /dev/pts/14
Kind regards,
Ruben Kerkhof
13 years, 9 months
[libvirt] [PATCH] check more error info about whether drive_add failed
by Wen Congyang
When we attach a disk, but we specify a wrong format of disk image,
qemu monitor command drive_add will fail, but libvirt does not detect
this error.
Signed-off-by: Wen Congyang <wency(a)cn.fujitsu.com>
---
src/qemu/qemu_monitor_text.c | 6 ++++++
1 files changed, 6 insertions(+), 0 deletions(-)
diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c
index 6d0ba4c..0fd7546 100644
--- a/src/qemu/qemu_monitor_text.c
+++ b/src/qemu/qemu_monitor_text.c
@@ -2453,6 +2453,12 @@ int qemuMonitorTextAddDrive(qemuMonitorPtr mon,
goto cleanup;
}
+ if (strstr(reply, "could not open disk image")) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("open disk image file failed"));
+ goto cleanup;
+ }
+
ret = 0;
cleanup:
--
1.7.1
13 years, 9 months
[libvirt] [PATCH] cgroup: preserve correct errno on failure
by Eric Blake
* src/util/cgroup.c (virCgroupSetValueStr, virCgroupGetValueStr)
(virCgroupRemoveRecursively): VIR_DEBUG can clobber errno.
(virCgroupRemove): Use VIR_DEBUG rather than DEBUG.
---
src/util/cgroup.c | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/src/util/cgroup.c b/src/util/cgroup.c
index 47c4633..b71eef9 100644
--- a/src/util/cgroup.c
+++ b/src/util/cgroup.c
@@ -290,8 +290,8 @@ static int virCgroupSetValueStr(virCgroupPtr group,
VIR_DEBUG("Set value '%s' to '%s'", keypath, value);
rc = virFileWriteStr(keypath, value, 0);
if (rc < 0) {
- DEBUG("Failed to write value '%s': %m", value);
rc = -errno;
+ VIR_DEBUG("Failed to write value '%s': %m", value);
} else {
rc = 0;
}
@@ -313,7 +313,7 @@ static int virCgroupGetValueStr(virCgroupPtr group,
rc = virCgroupPathOfController(group, controller, key, &keypath);
if (rc != 0) {
- DEBUG("No path of %s, %s", group->path, key);
+ VIR_DEBUG("No path of %s, %s", group->path, key);
return rc;
}
@@ -321,8 +321,8 @@ static int virCgroupGetValueStr(virCgroupPtr group,
rc = virFileReadAll(keypath, 1024, value);
if (rc < 0) {
- DEBUG("Failed to read %s: %m\n", keypath);
rc = -errno;
+ VIR_DEBUG("Failed to read %s: %m\n", keypath);
} else {
/* Terminated with '\n' has sometimes harmful effects to the caller */
char *p = strchr(*value, '\n');
@@ -635,8 +635,8 @@ static int virCgroupRemoveRecursively(char *grppath)
if (grpdir == NULL) {
if (errno == ENOENT)
return 0;
- VIR_ERROR(_("Unable to open %s (%d)"), grppath, errno);
rc = -errno;
+ VIR_ERROR(_("Unable to open %s (%d)"), grppath, errno);
return rc;
}
@@ -665,7 +665,7 @@ static int virCgroupRemoveRecursively(char *grppath)
}
closedir(grpdir);
- DEBUG("Removing cgroup %s", grppath);
+ VIR_DEBUG("Removing cgroup %s", grppath);
if (rmdir(grppath) != 0 && errno != ENOENT) {
rc = -errno;
VIR_ERROR(_("Unable to remove %s (%d)"), grppath, errno);
@@ -710,7 +710,7 @@ int virCgroupRemove(virCgroupPtr group)
&grppath) != 0)
continue;
- DEBUG("Removing cgroup %s and all child cgroups", grppath);
+ VIR_DEBUG("Removing cgroup %s and all child cgroups", grppath);
rc = virCgroupRemoveRecursively(grppath);
VIR_FREE(grppath);
}
--
1.7.4
13 years, 9 months
[libvirt] Release of libvirt-0.8.8
by Daniel Veillard
As scheduled the release is out, and available from the site:
ftp://libvirt.org/libvirt/
Thanks everybody for the earlier testing on the release candidate series
that was I think quite useful !
So this release is again mostly consisting of a very large batch of small
improvements and bug fixes, and a few new features:
Features:
- sysinfo: expose new API (Eric Blake)
- cgroup blkio weight support. (Gui Jianfeng)
- smartcard device support (Eric Blake)
- qemu: Support per-device boot ordering (Jiri Denemark)
Documentation:
- docs: fix typos (Eric Blake)
- docs: added link for nimbus to apps page (Justin Clift)
- Update src/README (Matthias Bolte)
- docs: Add information about libvirt-php new location (Michal Novotny)
- Add libvirt-php information page (Michal Novotny)
- cgroup: Add documentation for blkiotune elements. (Gui Jianfeng)
- docs/index.html.in: update KVM url (Niels de Vos)
- docs/index.html.in: update QEMU url (Alon Levy)
- docs: more on qemu locking patterns (Eric Blake)
- docs: renamed hudson project link to jenkins, matching project rename (Justin Clift)
- docs: Update docs for cpu_shares setting (Osier Yang)
- docs: replace CRLF with LF (Juerg Haefliger)
- docs: Add docs for new extra parameter pkipath (Osier Yang)
- docs: expand the man page text for virsh setmaxmem (Justin Clift)
- docs: fix incorrect XML element mentioned by setmem text (Justin Clift)
- docs: add a link to the bindings page under the downloads menu item (Justin Clift)
- docs: document <controller> element (Eric Blake)
- docs: move the apps page to the top level as its good promo (Justin Clift)
- docs: added new entries to apps page, plus adjusted a few existing (Justin Clift)
- docs: document <sysinfo> and <smbios> elements (Eric Blake)
- datatypes: Fix outdated function names in the documentation (Matthias Bolte)
- Add documentation for VIR_DOMAIN_MEMORY_PARAM_UNLIMITED (Matthias Bolte)
- docs: Move the "Network Filtering" page one level up in the hierarchy (Matthias Bolte)
- docs: add buildbot to the apps page (Justin Clift)
- docs: add new conversion heading to the apps listing (Justin Clift)
- docs: updated windows page for new 0.8.7 installer (Justin Clift)
- docs: clarify virsh setvcpus and setmem usage with active domains (Justin Clift)
- Document HAP domain feature (Jim Fehlig)
- docs: fix trivial typos in currentMemory description (Justin Clift)
- doc: improve the documentation of desturi (Wen Congyang)
- docs: reorder apps page alphabetically, plus add libguestfs entries (Justin Clift)
- docs: add entry for archipel to the apps page (Justin Clift)
- docs: use xml entity encoding for extended character last name (Justin Clift)
- docs: updated memtune info again in virsh command reference (Justin Clift)
- docs: updated release of virsh cmd reference, with memtune info (Justin Clift)
- maint: document dislike of mismatched if/else bracing (Eric Blake)
- docs: added libvirt-announce to contact page (Justin Clift)
Portability:
- qemu: ignore failure of qemu -M ? on older qemu (Eric Blake)
- virsh: avoid mingw compiler warnings (Eric Blake)
- build: avoid problems with autogen.sh runs from tarball (Eric Blake)
- build: fix cygwin strerror_r failure (Eric Blake)
- Avoid pthread_sigmask on Win32 platforms (Daniel P. Berrange)
- Fix compilation when building without sasl (Daniel Veillard)
- build: fix parted detection at configure time (Eric Blake)
- Fix setup of lib directory with autogen.sh --system (Daniel P. Berrange)
- build: fix 'make check' with older git (Eric Blake)
- maint: support --no-git option during autogen.sh (Eric Blake)
- libvirt-guests: remove bashisms (Laurent Léonard)
- build: restore mingw build (Eric Blake)
- commandtest: avoid printing loader-control variables from commandhelper (Diego Elio Pettenò)
Bug Fixes:
- cgroup: preserve correct errno on failure (Eric Blake)
- qemu: Fix command line generation with faked host CPU (Jiri Denemark)
- tests: Fake host capabilities properly (Jiri Denemark)
- build: address clang reports about virCommand (Eric Blake)
- qemu: don't mask real error with oom report (Eric Blake)
- qemu: avoid NULL derefs (Eric Blake)
- virDomainMemoryStats: avoid null dereference (Eric Blake)
- Fix leak of mutex attributes in POSIX threads impl (Daniel P. Berrange)
- Fix leak in SCSI storage backend (Daniel P. Berrange)
- storage: Create enough volumes for mpath pool (Osier Yang)
- qemu: avoid NULL deref on error (Eric Blake)
- conf: Fix XML generation for smartcards (Jiri Denemark)
- Fix cleanup on VM state after failed QEMU startup (Daniel P. Berrange)
- libvirt-qemu: Fix enum type declaration (Jiri Denemark)
- xen: Prevent updating device when attaching a device (Osier Yang)
- qemu: Fix escape_monitor(escape_shell(command)) (Philipp Hahn)
- qemu: fix attach-interface regression (Wen Congyang)
- Fix typo in parsing of spice 'auth' data (Michal Privoznik)
- Reset logging filter function when forking (Daniel P. Berrange)
- Block SIGPIPE around virExec hook functions (Daniel P. Berrange)
- Only initialize/cleanup libpciaccess once (Daniel P. Berrange)
- macvtap: fix 2 nla_put expressions (non-serious bug) (Stefan Berger)
- qemu: avoid double shutdown (Eric Blake)
- Fix conflicts with glibc globals (Davidlohr Bueso)
- qemuBuildDeviceAddressStr() checks for QEMUD_CMD_FLAG_PCI_MULTIBUS (Niels de Vos)
- Don't sleep in poll() if there is existing SASL decoded data (Daniel P. Berrange)
- Initialization error of controller in QEmu SCSI hotplug (Wen Congyang)
- esx: Ensure max-memory has 4 megabyte granularity (Matthias Bolte)
- Remove double close of qemu monitor (Daniel P. Berrange)
- Prevent overfilling of self-pipe in python event loop (Daniel P. Berrange)
- avoid vm to be deleted if qemuConnectMonitor failed (Wen Congyang)
- tests: Fix virtio channel tests (Jiri Denemark)
- event: fix event-handling allocation crash (Eric Blake)
- storage: Round up capacity for LVM volume creation (Osier Yang)
- Do not use virtio-serial port 0 for generic ports (David Allan)
- Manually kill gzip if restore fails before starting qemu (Laine Stump)
- Set SELinux context label of pipes used for qemu migration (Laine Stump)
- virsh: require --mac to avoid detach-interface ambiguity (Michal Privoznik)
- dispatch error before return (Wen Congyang)
- event: fix event-handling data race (Eric Blake)
- qemu: Retry JSON monitor cont cmd on MigrationExpected error (Jim Fehlig)
- Fix startup with VNC password expiry on old QEMU (Daniel P. Berrange)
- Fix error reporting when machine type probe fails (Daniel P. Berrange)
- Avoid crash in security driver if model is NULL (Daniel P. Berrange)
- qemu: Fix a possible deadlock in p2p migration (Wen Congyang)
- qemu: Avoid sending STOPPED event twice (Jiri Denemark)
- spec: Start libvirt-guests only if it's on in current runlevel (Jiri Denemark)
- Increase size of driver table to make UML work again (Daniel P. Berrange)
- qemu: don't fail capabilities check on 0.12.x (Eric Blake)
- Fix 'make check' after commit 04197350 (Jim Fehlig)
- esx: Fix memory leak in HostSystem managed object free function (Matthias Bolte)
- qemu: Watchdog IB700 is not a PCI device (RHBZ#667091). (Richard W.M. Jones)
- cpu: plug memory leak (Eric Blake)
- network: plug memory leak (Eric Blake)
- network: plug unininitialized read found by valgrind (Eric Blake)
- remote: Don't lose track of events when callbacks are slow (Cole Robinson)
- conf: Report error if invalid type specified for character device (Osier Yang)
- daemon: Fix core dumps if unix_sock_group is set (Jiri Denemark)
- vbox: Use correct VRAM size unit (Matthias Bolte)
- bridge: Fix generation of dnsmasq's --dhcp-hostsfile option (Kay Schubert)
- qemu: Fix bogus warning about uninitialized saveptr (Jiri Denemark)
- Don't chown qemu saved image back to root after save if dynamic_ownership=0 (Laine Stump)
Improvements:
- maint: delete unused 'make install' step (Eric Blake)
- Update czech localization (Zdenek Styblik)
- Avoid empty strings when --with-packager(-version) is not specified (Matthias Bolte)
- Output commandline on status != 0 in virCommandWait (Matthias Bolte)
- add missing error handling to virGetDomain (Christophe Fergeau)
- call virReportOOMError when appropriate in hash.c (Christophe Fergeau)
- xml: avoid compiler warning (Eric Blake)
- nwfilter: reorder match extensions relative to state match (Stefan Berger)
- fix OOM handling in hash routines (Christophe Fergeau)
- docs: Distribute XSLT files to generate HACKING (Matthias Bolte)
- qemu: Report a more informative error for missing cgroup controllers (Matthias Bolte)
- Imprint all logs with version + package build information (Daniel P. Berrange)
- Reduce log level when cgroups aren't mounted (Daniel P. Berrange)
- Avoid warnings from nwfilter driver when run non-root (Daniel P. Berrange)
- build: distribute 'make syntax-check' tweaks (Eric Blake)
- Adjust some log levels in udev driver (Daniel P. Berrange)
- Add check for binary existing in machine type probe (Daniel P. Berrange)
- Add a little more debugging for async events (Daniel P. Berrange)
- Move connection driver modules directory (Daniel P. Berrange)
- Support SCSI RAID type & lower log level for unknown types (Daniel P. Berrange)
- Don't use CLONE_NEWUSER for now (Serge E. Hallyn)
- sysinfo: implement qemu support (Eric Blake)
- sysinfo: refactor xml formatting (Eric Blake)
- sysinfo: implement virsh support (Eric Blake)
- sysinfo: implement the remote protocol (Eric Blake)
- sysinfo: implement the public API (Eric Blake)
- sysinfo: define internal driver API (Eric Blake)
- LXC: LXC Blkio weight configuration support. (Gui Jianfeng)
- qemu: Implement blkio tunable XML configuration and parsing. (Gui Jianfeng)
- cgroup: Update XML Schema for new entries. (Gui Jianfeng)
- cgroup: Implement blkio.weight tuning API. (Gui Jianfeng)
- cgroup: Enable cgroup hierarchy for blkio cgroup (Gui Jianfeng)
- Update Dutch and Polish localizations (Daniel Veillard)
- Vietnamese translations for libvirt (Hero Phương)
- spicevmc: support older -device spicevmc of qemu 0.13.0 (Eric Blake)
- smartcard: add spicevmc support (Eric Blake)
- spicevmc: support new qemu chardev (Daniel P. Berrange)
- smartcard: turn on qemu support (Eric Blake)
- smartcard: enable SELinux support (Eric Blake)
- smartcard: check for qemu capability (Eric Blake)
- smartcard: add domain conf support (Eric Blake)
- smartcard: add XML support for <smartcard> device (Eric Blake)
- qemu: Support booting from hostdev PCI devices (Jiri Denemark)
- Support booting from hostdev devices (Jiri Denemark)
- qemu: Add shortcut for HMP pass through (Jiri Denemark)
- macvtap: fix variable in debugging output (Stefan Berger)
- qemu: Build command line for incoming tunneled migration (Osier Yang)
- bridge_driver: handle DNS over IPv6 (Paweł Krześniak)
- tests: handle backspace-newline pairs in test input files (Juerg Haefliger)
- qemu: More clear error parsing domain def failure of tunneled migration (Osier Yang)
- maint: reject raw close, popen in 'make syntax-check' (Eric Blake)
- build: avoid close, system (Eric Blake)
- Add VIR_DIV_UP to divide memory or storage request sizes with round up (Matthias Bolte)
- qemu: fix augeas support for vnc_auto_unix_socket (Eric Blake)
- virsh: added --all flag to freecell command (Michal Privoznik)
- esx: Don't try to change max-memory of an active domain (Matthias Bolte)
- qemu aio: enable support (Eric Blake)
- qemu aio: parse aio support from qemu -help (Matthias Dahl)
- qemu aio: add XML parsing (Matthias Dahl)
- Remove bogus log warning lines when launching QEMU (Daniel P. Berrange)
- qemu: fix error messages (Eric Blake)
- qemu: Report more accurate error on failure to attach device. (Hu Tao)
- Force guest suspend at timeout (Wen Congyang)
- Show migration progress. (Wen Congyang)
- Cancel migration if user presses Ctrl-C when migration is in progress (Hu Tao)
- qemu: use separate alias for chardev and associated device (Eric Blake)
- remote: Add extra parameter pkipath for URI (Osier Yang)
- Update localization files from Fedora i10n (Daniel Veillard)
- Add check for poll error events in monitor (Daniel P. Berrange)
- Filter out certain expected error messages from libvirtd (Daniel P. Berrange)
- Add a function to the security driver API that sets the label of an open fd. (Laine Stump)
- qemu: Error prompt when managed save a shutoff domain (Osier Yang)
- build: avoid corrupted gnulib/tests/Makefile (Eric Blake)
- qemu: sound: Support intel 'ich6' model (Cole Robinson)
- vmx: Use VIR_ERR_CONFIG_UNSUPPORTED when appropriated (Matthias Bolte)
- Push unapplied fixups for previous patch (Cole Robinson)
- qemu: Add conf option to auto setup VNC unix sockets (Cole Robinson)
- qemu: Allow serving VNC over a unix domain socket (Cole Robinson)
- qemu: Set domain def transient at beginning of startup process (Cole Robinson)
- qemu: report more proper error for unsupported graphics (Osier Yang)
- qemu: Fail if per-device boot is used but deviceboot is not supported (Jiri Denemark)
- Turn libvirt.c error reporting functions into macros (Daniel P. Berrange)
- build: use more gnulib modules for simpler code (Eric Blake)
- Remove two unused PATH_MAX-sized char arrays from the stack (Matthias Bolte)
- Use VIR_ERR_OPERATION_INVALID when appropriated (Matthias Bolte)
- Fix misuse of VIR_ERR_INVALID_* error code (Matthias Bolte)
- Simplify "NWFilterPool" to "NWFilter" (Matthias Bolte)
- datatypes: Get virSecretFreeName in sync with the other free functions (Matthias Bolte)
- qemu: use -incoming fd:n to avoid qemu holding fd indefinitely (Eric Blake)
- tests: Add tests for per-device boot elements (Jiri Denemark)
- Introduce per-device boot element (Jiri Denemark)
- conf: Move boot parsing into a separate function (Jiri Denemark)
- build: let xgettext see strings in libvirt-guests (Eric Blake)
- A couple of fixes for the search PHP code (Daniel Veillard)
- virsh: Use WITH_SECDRIVER_APPARMOR to detect AppArmor support (Matthias Bolte)
- memtune: Let virsh know the unlimited value for memory tunables (Nikunj A. Dadhania)
- maint: improve sc_prohibit_strncmp syntax check (Eric Blake)
- Enable tuning of qemu network tap device "sndbuf" size (Laine Stump)
- Add XML config switch to enable/disable vhost-net support (Laine Stump)
- Use the new set_password monitor command to set password. (Marc-André Lureau)
- qemu: add set_password and expire_password monitor commands (Marc-André Lureau)
- qemu: move monitor device out of domain_conf common code (Eric Blake)
- domain_conf: split source data out from ChrDef (Eric Blake)
- cpu: Add support for Westmere CPU model (Jiri Denemark)
- qemu: improve device flag parsing (Eric Blake)
- util: add missing string->integer conversion functions (Eric Blake)
- qemu: convert capabilities to use virCommand (Eric Blake)
- virsh: ensure --maximum flag used only with --config for setvcpus (Justin Clift)
- Add HAP to xen hypervisor capabilities (Jim Fehlig)
- Add support for HAP feature to xen drivers (Jim Fehlig)
- Add HAP to virDomainFeature enum (Jim Fehlig)
- tests: virsh is no longer in builddir/src (Eric Blake)
- virFindFileInPath: only find executable non-directory (Eric Blake)
- Fix old PHP syntax in the search online form (Daniel Veillard)
- report error when specifying wrong desturi (Wen Congyang)
- qemu: Reject SDL graphic if it's not supported by qemu (Osier Yang)
- vbox: Silently ignore missing registry key on Windows (Matthias Bolte)
- python: Use PyCapsule API if available (Cole Robinson)
- event-test: Simplify debug on/off (Cole Robinson)
- Refactor the security drivers to simplify usage (Daniel P. Berrange)
- Add AM_MAINTAINER_MODE (Guido Günther)
- esx: Move occurrence check into esxVI_LookupObjectContentByType (Matthias Bolte)
- esx: Add domain autostart support (Matthias Bolte)
- vmx: Add support for video device VRAM size (Matthias Bolte)
- API: Improve log for domain related APIs (Osier Yang)
- schema: tighten <serial><protocol type=...> relaxNG (Eric Blake)
- Log an error on attempts to add a NAT rule for non-IPv4 addresses (Laine Stump)
- Improve error reporting when parsing dhcp info for virtual networks (Laine Stump)
- qemu driver: fix positioning to end of log file (Stefan Berger)
- build: satisfy 'make syntax-check' regarding year change (Eric Blake)
Cleanups:
- build: silence some clang warnings (Eric Blake)
- maint: kill dead assignments (Eric Blake)
- build: silence false positive clang report (Eric Blake)
- maint: whitespace cleanup (Eric Blake)
- maint: update AUTHORS (Eric Blake)
- Prefer C style comments over C++ ones (Matthias Bolte)
- Revert all previous error log priority hacks (Daniel P. Berrange)
- Cleanup code style in logging APIs (Daniel P. Berrange)
- Remove redundant brackets around return values (Daniel P. Berrange)
- tests: Remove obsolete secaatest (Matthias Bolte)
- datatypes: avoid redundant __FUNCTION__ (Eric Blake)
Thanks everybody for the patches, reports and documentation improvements !
Daniel
--
Daniel Veillard | libxml Gnome XML XSLT toolkit http://xmlsoft.org/
daniel(a)veillard.com | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library http://libvirt.org/
13 years, 9 months
[libvirt] [PATCH 0/2] Add tx_alg attribute to interface XML for virtio backend
by Laine Stump
These two patches provide the ability to configure which of two
algorithms is used on the TX side of the virtio-net-pci
device. Details are in the comments for PATCH 2/2.
There are also at least a couple of open questions about the patch,
which are posed in the comments of 2/2. It's highly unlikely this
patch will be pushed as-is - I'm sending it mostly to solicit comments
on those questions.
13 years, 9 months
[libvirt] [PATCH] maint: Replace tabs in apibuild.py with spaces
by Jiri Denemark
---
docs/apibuild.py | 3194 +++++++++++++++++++++++++++---------------------------
1 files changed, 1597 insertions(+), 1597 deletions(-)
diff --git a/docs/apibuild.py b/docs/apibuild.py
index 895a313..506932d 100755
--- a/docs/apibuild.py
+++ b/docs/apibuild.py
@@ -72,34 +72,34 @@ class identifier:
def __init__(self, name, header=None, module=None, type=None, lineno = 0,
info=None, extra=None, conditionals = None):
self.name = name
- self.header = header
- self.module = module
- self.type = type
- self.info = info
- self.extra = extra
- self.lineno = lineno
- self.static = 0
- if conditionals == None or len(conditionals) == 0:
- self.conditionals = None
- else:
- self.conditionals = conditionals[:]
- if self.name == debugsym:
- print "=> define %s : %s" % (debugsym, (module, type, info,
- extra, conditionals))
+ self.header = header
+ self.module = module
+ self.type = type
+ self.info = info
+ self.extra = extra
+ self.lineno = lineno
+ self.static = 0
+ if conditionals == None or len(conditionals) == 0:
+ self.conditionals = None
+ else:
+ self.conditionals = conditionals[:]
+ if self.name == debugsym:
+ print "=> define %s : %s" % (debugsym, (module, type, info,
+ extra, conditionals))
def __repr__(self):
r = "%s %s:" % (self.type, self.name)
- if self.static:
- r = r + " static"
- if self.module != None:
- r = r + " from %s" % (self.module)
- if self.info != None:
- r = r + " " + `self.info`
- if self.extra != None:
- r = r + " " + `self.extra`
- if self.conditionals != None:
- r = r + " " + `self.conditionals`
- return r
+ if self.static:
+ r = r + " static"
+ if self.module != None:
+ r = r + " from %s" % (self.module)
+ if self.info != None:
+ r = r + " " + `self.info`
+ if self.extra != None:
+ r = r + " " + `self.extra`
+ if self.conditionals != None:
+ r = r + " " + `self.conditionals`
+ return r
def set_header(self, header):
@@ -117,10 +117,10 @@ class identifier:
def set_static(self, static):
self.static = static
def set_conditionals(self, conditionals):
- if conditionals == None or len(conditionals) == 0:
- self.conditionals = None
- else:
- self.conditionals = conditionals[:]
+ if conditionals == None or len(conditionals) == 0:
+ self.conditionals = None
+ else:
+ self.conditionals = conditionals[:]
def get_name(self):
return self.name
@@ -143,96 +143,96 @@ class identifier:
def update(self, header, module, type = None, info = None, extra=None,
conditionals=None):
- if self.name == debugsym:
- print "=> update %s : %s" % (debugsym, (module, type, info,
- extra, conditionals))
+ if self.name == debugsym:
+ print "=> update %s : %s" % (debugsym, (module, type, info,
+ extra, conditionals))
if header != None and self.header == None:
- self.set_header(module)
+ self.set_header(module)
if module != None and (self.module == None or self.header == self.module):
- self.set_module(module)
+ self.set_module(module)
if type != None and self.type == None:
- self.set_type(type)
+ self.set_type(type)
if info != None:
- self.set_info(info)
+ self.set_info(info)
if extra != None:
- self.set_extra(extra)
+ self.set_extra(extra)
if conditionals != None:
- self.set_conditionals(conditionals)
+ self.set_conditionals(conditionals)
class index:
def __init__(self, name = "noname"):
self.name = name
self.identifiers = {}
self.functions = {}
- self.variables = {}
- self.includes = {}
- self.structs = {}
- self.enums = {}
- self.typedefs = {}
- self.macros = {}
- self.references = {}
- self.info = {}
+ self.variables = {}
+ self.includes = {}
+ self.structs = {}
+ self.enums = {}
+ self.typedefs = {}
+ self.macros = {}
+ self.references = {}
+ self.info = {}
def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
- return None
+ return None
d = None
try:
- d = self.identifiers[name]
- d.update(header, module, type, lineno, info, extra, conditionals)
- except:
- d = identifier(name, header, module, type, lineno, info, extra, conditionals)
- self.identifiers[name] = d
+ d = self.identifiers[name]
+ d.update(header, module, type, lineno, info, extra, conditionals)
+ except:
+ d = identifier(name, header, module, type, lineno, info, extra, conditionals)
+ self.identifiers[name] = d
- if d != None and static == 1:
- d.set_static(1)
+ if d != None and static == 1:
+ d.set_static(1)
- if d != None and name != None and type != None:
- self.references[name] = d
+ if d != None and name != None and type != None:
+ self.references[name] = d
- if name == debugsym:
- print "New ref: %s" % (d)
+ if name == debugsym:
+ print "New ref: %s" % (d)
- return d
+ return d
def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
- return None
+ return None
d = None
try:
- d = self.identifiers[name]
- d.update(header, module, type, lineno, info, extra, conditionals)
- except:
- d = identifier(name, header, module, type, lineno, info, extra, conditionals)
- self.identifiers[name] = d
-
- if d != None and static == 1:
- d.set_static(1)
-
- if d != None and name != None and type != None:
- if type == "function":
- self.functions[name] = d
- elif type == "functype":
- self.functions[name] = d
- elif type == "variable":
- self.variables[name] = d
- elif type == "include":
- self.includes[name] = d
- elif type == "struct":
- self.structs[name] = d
- elif type == "enum":
- self.enums[name] = d
- elif type == "typedef":
- self.typedefs[name] = d
- elif type == "macro":
- self.macros[name] = d
- else:
- print "Unable to register type ", type
-
- if name == debugsym:
- print "New symbol: %s" % (d)
-
- return d
+ d = self.identifiers[name]
+ d.update(header, module, type, lineno, info, extra, conditionals)
+ except:
+ d = identifier(name, header, module, type, lineno, info, extra, conditionals)
+ self.identifiers[name] = d
+
+ if d != None and static == 1:
+ d.set_static(1)
+
+ if d != None and name != None and type != None:
+ if type == "function":
+ self.functions[name] = d
+ elif type == "functype":
+ self.functions[name] = d
+ elif type == "variable":
+ self.variables[name] = d
+ elif type == "include":
+ self.includes[name] = d
+ elif type == "struct":
+ self.structs[name] = d
+ elif type == "enum":
+ self.enums[name] = d
+ elif type == "typedef":
+ self.typedefs[name] = d
+ elif type == "macro":
+ self.macros[name] = d
+ else:
+ print "Unable to register type ", type
+
+ if name == debugsym:
+ print "New symbol: %s" % (d)
+
+ return d
def merge(self, idx):
for id in idx.functions.keys():
@@ -240,41 +240,41 @@ class index:
# macro might be used to override functions or variables
# definitions
#
- if self.macros.has_key(id):
- del self.macros[id]
- if self.functions.has_key(id):
- print "function %s from %s redeclared in %s" % (
- id, self.functions[id].header, idx.functions[id].header)
- else:
- self.functions[id] = idx.functions[id]
- self.identifiers[id] = idx.functions[id]
+ if self.macros.has_key(id):
+ del self.macros[id]
+ if self.functions.has_key(id):
+ print "function %s from %s redeclared in %s" % (
+ id, self.functions[id].header, idx.functions[id].header)
+ else:
+ self.functions[id] = idx.functions[id]
+ self.identifiers[id] = idx.functions[id]
for id in idx.variables.keys():
#
# macro might be used to override functions or variables
# definitions
#
- if self.macros.has_key(id):
- del self.macros[id]
- if self.variables.has_key(id):
- print "variable %s from %s redeclared in %s" % (
- id, self.variables[id].header, idx.variables[id].header)
- else:
- self.variables[id] = idx.variables[id]
- self.identifiers[id] = idx.variables[id]
+ if self.macros.has_key(id):
+ del self.macros[id]
+ if self.variables.has_key(id):
+ print "variable %s from %s redeclared in %s" % (
+ id, self.variables[id].header, idx.variables[id].header)
+ else:
+ self.variables[id] = idx.variables[id]
+ self.identifiers[id] = idx.variables[id]
for id in idx.structs.keys():
- if self.structs.has_key(id):
- print "struct %s from %s redeclared in %s" % (
- id, self.structs[id].header, idx.structs[id].header)
- else:
- self.structs[id] = idx.structs[id]
- self.identifiers[id] = idx.structs[id]
+ if self.structs.has_key(id):
+ print "struct %s from %s redeclared in %s" % (
+ id, self.structs[id].header, idx.structs[id].header)
+ else:
+ self.structs[id] = idx.structs[id]
+ self.identifiers[id] = idx.structs[id]
for id in idx.typedefs.keys():
- if self.typedefs.has_key(id):
- print "typedef %s from %s redeclared in %s" % (
- id, self.typedefs[id].header, idx.typedefs[id].header)
- else:
- self.typedefs[id] = idx.typedefs[id]
- self.identifiers[id] = idx.typedefs[id]
+ if self.typedefs.has_key(id):
+ print "typedef %s from %s redeclared in %s" % (
+ id, self.typedefs[id].header, idx.typedefs[id].header)
+ else:
+ self.typedefs[id] = idx.typedefs[id]
+ self.identifiers[id] = idx.typedefs[id]
for id in idx.macros.keys():
#
# macro might be used to override functions or variables
@@ -286,88 +286,88 @@ class index:
continue
if self.enums.has_key(id):
continue
- if self.macros.has_key(id):
- print "macro %s from %s redeclared in %s" % (
- id, self.macros[id].header, idx.macros[id].header)
- else:
- self.macros[id] = idx.macros[id]
- self.identifiers[id] = idx.macros[id]
+ if self.macros.has_key(id):
+ print "macro %s from %s redeclared in %s" % (
+ id, self.macros[id].header, idx.macros[id].header)
+ else:
+ self.macros[id] = idx.macros[id]
+ self.identifiers[id] = idx.macros[id]
for id in idx.enums.keys():
- if self.enums.has_key(id):
- print "enum %s from %s redeclared in %s" % (
- id, self.enums[id].header, idx.enums[id].header)
- else:
- self.enums[id] = idx.enums[id]
- self.identifiers[id] = idx.enums[id]
+ if self.enums.has_key(id):
+ print "enum %s from %s redeclared in %s" % (
+ id, self.enums[id].header, idx.enums[id].header)
+ else:
+ self.enums[id] = idx.enums[id]
+ self.identifiers[id] = idx.enums[id]
def merge_public(self, idx):
for id in idx.functions.keys():
- if self.functions.has_key(id):
- # check that function condition agrees with header
- if idx.functions[id].conditionals != \
- self.functions[id].conditionals:
- print "Header condition differs from Function for %s:" \
- % id
- print " H: %s" % self.functions[id].conditionals
- print " C: %s" % idx.functions[id].conditionals
- up = idx.functions[id]
- self.functions[id].update(None, up.module, up.type, up.info, up.extra)
- # else:
- # print "Function %s from %s is not declared in headers" % (
- # id, idx.functions[id].module)
- # TODO: do the same for variables.
+ if self.functions.has_key(id):
+ # check that function condition agrees with header
+ if idx.functions[id].conditionals != \
+ self.functions[id].conditionals:
+ print "Header condition differs from Function for %s:" \
+ % id
+ print " H: %s" % self.functions[id].conditionals
+ print " C: %s" % idx.functions[id].conditionals
+ up = idx.functions[id]
+ self.functions[id].update(None, up.module, up.type, up.info, up.extra)
+ # else:
+ # print "Function %s from %s is not declared in headers" % (
+ # id, idx.functions[id].module)
+ # TODO: do the same for variables.
def analyze_dict(self, type, dict):
count = 0
- public = 0
+ public = 0
for name in dict.keys():
- id = dict[name]
- count = count + 1
- if id.static == 0:
- public = public + 1
+ id = dict[name]
+ count = count + 1
+ if id.static == 0:
+ public = public + 1
if count != public:
- print " %d %s , %d public" % (count, type, public)
- elif count != 0:
- print " %d public %s" % (count, type)
+ print " %d %s , %d public" % (count, type, public)
+ elif count != 0:
+ print " %d public %s" % (count, type)
def analyze(self):
- self.analyze_dict("functions", self.functions)
- self.analyze_dict("variables", self.variables)
- self.analyze_dict("structs", self.structs)
- self.analyze_dict("typedefs", self.typedefs)
- self.analyze_dict("macros", self.macros)
+ self.analyze_dict("functions", self.functions)
+ self.analyze_dict("variables", self.variables)
+ self.analyze_dict("structs", self.structs)
+ self.analyze_dict("typedefs", self.typedefs)
+ self.analyze_dict("macros", self.macros)
class CLexer:
"""A lexer for the C language, tokenize the input by reading and
analyzing it line by line"""
def __init__(self, input):
self.input = input
- self.tokens = []
- self.line = ""
- self.lineno = 0
+ self.tokens = []
+ self.line = ""
+ self.lineno = 0
def getline(self):
line = ''
- while line == '':
- line = self.input.readline()
- if not line:
- return None
- self.lineno = self.lineno + 1
- line = string.lstrip(line)
- line = string.rstrip(line)
- if line == '':
- continue
- while line[-1] == '\\':
- line = line[:-1]
- n = self.input.readline()
- self.lineno = self.lineno + 1
- n = string.lstrip(n)
- n = string.rstrip(n)
- if not n:
- break
- else:
- line = line + n
+ while line == '':
+ line = self.input.readline()
+ if not line:
+ return None
+ self.lineno = self.lineno + 1
+ line = string.lstrip(line)
+ line = string.rstrip(line)
+ if line == '':
+ continue
+ while line[-1] == '\\':
+ line = line[:-1]
+ n = self.input.readline()
+ self.lineno = self.lineno + 1
+ n = string.lstrip(n)
+ n = string.rstrip(n)
+ if not n:
+ break
+ else:
+ line = line + n
return line
def getlineno(self):
@@ -378,193 +378,193 @@ class CLexer:
def debug(self):
print "Last token: ", self.last
- print "Token queue: ", self.tokens
- print "Line %d end: " % (self.lineno), self.line
+ print "Token queue: ", self.tokens
+ print "Line %d end: " % (self.lineno), self.line
def token(self):
while self.tokens == []:
- if self.line == "":
- line = self.getline()
- else:
- line = self.line
- self.line = ""
- if line == None:
- return None
-
- if line[0] == '#':
- self.tokens = map((lambda x: ('preproc', x)),
- string.split(line))
- break;
- l = len(line)
- if line[0] == '"' or line[0] == "'":
- end = line[0]
- line = line[1:]
- found = 0
- tok = ""
- while found == 0:
- i = 0
- l = len(line)
- while i < l:
- if line[i] == end:
- self.line = line[i+1:]
- line = line[:i]
- l = i
- found = 1
- break
- if line[i] == '\\':
- i = i + 1
- i = i + 1
- tok = tok + line
- if found == 0:
- line = self.getline()
- if line == None:
- return None
- self.last = ('string', tok)
- return self.last
-
- if l >= 2 and line[0] == '/' and line[1] == '*':
- line = line[2:]
- found = 0
- tok = ""
- while found == 0:
- i = 0
- l = len(line)
- while i < l:
- if line[i] == '*' and i+1 < l and line[i+1] == '/':
- self.line = line[i+2:]
- line = line[:i-1]
- l = i
- found = 1
- break
- i = i + 1
- if tok != "":
- tok = tok + "\n"
- tok = tok + line
- if found == 0:
- line = self.getline()
- if line == None:
- return None
- self.last = ('comment', tok)
- return self.last
- if l >= 2 and line[0] == '/' and line[1] == '/':
- line = line[2:]
- self.last = ('comment', line)
- return self.last
- i = 0
- while i < l:
- if line[i] == '/' and i+1 < l and line[i+1] == '/':
- self.line = line[i:]
- line = line[:i]
- break
- if line[i] == '/' and i+1 < l and line[i+1] == '*':
- self.line = line[i:]
- line = line[:i]
- break
- if line[i] == '"' or line[i] == "'":
- self.line = line[i:]
- line = line[:i]
- break
- i = i + 1
- l = len(line)
- i = 0
- while i < l:
- if line[i] == ' ' or line[i] == '\t':
- i = i + 1
- continue
- o = ord(line[i])
- if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
- (o >= 48 and o <= 57):
- s = i
- while i < l:
- o = ord(line[i])
- if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
- (o >= 48 and o <= 57) or string.find(
- " \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
- i = i + 1
- else:
- break
- self.tokens.append(('name', line[s:i]))
- continue
- if string.find("(){}:;,[]", line[i]) != -1:
+ if self.line == "":
+ line = self.getline()
+ else:
+ line = self.line
+ self.line = ""
+ if line == None:
+ return None
+
+ if line[0] == '#':
+ self.tokens = map((lambda x: ('preproc', x)),
+ string.split(line))
+ break;
+ l = len(line)
+ if line[0] == '"' or line[0] == "'":
+ end = line[0]
+ line = line[1:]
+ found = 0
+ tok = ""
+ while found == 0:
+ i = 0
+ l = len(line)
+ while i < l:
+ if line[i] == end:
+ self.line = line[i+1:]
+ line = line[:i]
+ l = i
+ found = 1
+ break
+ if line[i] == '\\':
+ i = i + 1
+ i = i + 1
+ tok = tok + line
+ if found == 0:
+ line = self.getline()
+ if line == None:
+ return None
+ self.last = ('string', tok)
+ return self.last
+
+ if l >= 2 and line[0] == '/' and line[1] == '*':
+ line = line[2:]
+ found = 0
+ tok = ""
+ while found == 0:
+ i = 0
+ l = len(line)
+ while i < l:
+ if line[i] == '*' and i+1 < l and line[i+1] == '/':
+ self.line = line[i+2:]
+ line = line[:i-1]
+ l = i
+ found = 1
+ break
+ i = i + 1
+ if tok != "":
+ tok = tok + "\n"
+ tok = tok + line
+ if found == 0:
+ line = self.getline()
+ if line == None:
+ return None
+ self.last = ('comment', tok)
+ return self.last
+ if l >= 2 and line[0] == '/' and line[1] == '/':
+ line = line[2:]
+ self.last = ('comment', line)
+ return self.last
+ i = 0
+ while i < l:
+ if line[i] == '/' and i+1 < l and line[i+1] == '/':
+ self.line = line[i:]
+ line = line[:i]
+ break
+ if line[i] == '/' and i+1 < l and line[i+1] == '*':
+ self.line = line[i:]
+ line = line[:i]
+ break
+ if line[i] == '"' or line[i] == "'":
+ self.line = line[i:]
+ line = line[:i]
+ break
+ i = i + 1
+ l = len(line)
+ i = 0
+ while i < l:
+ if line[i] == ' ' or line[i] == '\t':
+ i = i + 1
+ continue
+ o = ord(line[i])
+ if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
+ (o >= 48 and o <= 57):
+ s = i
+ while i < l:
+ o = ord(line[i])
+ if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
+ (o >= 48 and o <= 57) or string.find(
+ " \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
+ i = i + 1
+ else:
+ break
+ self.tokens.append(('name', line[s:i]))
+ continue
+ if string.find("(){}:;,[]", line[i]) != -1:
# if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
-# line[i] == '}' or line[i] == ':' or line[i] == ';' or \
-# line[i] == ',' or line[i] == '[' or line[i] == ']':
- self.tokens.append(('sep', line[i]))
- i = i + 1
- continue
- if string.find("+-*><=/%&!|.", line[i]) != -1:
+# line[i] == '}' or line[i] == ':' or line[i] == ';' or \
+# line[i] == ',' or line[i] == '[' or line[i] == ']':
+ self.tokens.append(('sep', line[i]))
+ i = i + 1
+ continue
+ if string.find("+-*><=/%&!|.", line[i]) != -1:
# if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
-# line[i] == '>' or line[i] == '<' or line[i] == '=' or \
-# line[i] == '/' or line[i] == '%' or line[i] == '&' or \
-# line[i] == '!' or line[i] == '|' or line[i] == '.':
- if line[i] == '.' and i + 2 < l and \
- line[i+1] == '.' and line[i+2] == '.':
- self.tokens.append(('name', '...'))
- i = i + 3
- continue
-
- j = i + 1
- if j < l and (
- string.find("+-*><=/%&!|", line[j]) != -1):
-# line[j] == '+' or line[j] == '-' or line[j] == '*' or \
-# line[j] == '>' or line[j] == '<' or line[j] == '=' or \
-# line[j] == '/' or line[j] == '%' or line[j] == '&' or \
-# line[j] == '!' or line[j] == '|'):
- self.tokens.append(('op', line[i:j+1]))
- i = j + 1
- else:
- self.tokens.append(('op', line[i]))
- i = i + 1
- continue
- s = i
- while i < l:
- o = ord(line[i])
- if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
- (o >= 48 and o <= 57) or (
- string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
-# line[i] != ' ' and line[i] != '\t' and
-# line[i] != '(' and line[i] != ')' and
-# line[i] != '{' and line[i] != '}' and
-# line[i] != ':' and line[i] != ';' and
-# line[i] != ',' and line[i] != '+' and
-# line[i] != '-' and line[i] != '*' and
-# line[i] != '/' and line[i] != '%' and
-# line[i] != '&' and line[i] != '!' and
-# line[i] != '|' and line[i] != '[' and
-# line[i] != ']' and line[i] != '=' and
-# line[i] != '*' and line[i] != '>' and
-# line[i] != '<'):
- i = i + 1
- else:
- break
- self.tokens.append(('name', line[s:i]))
-
- tok = self.tokens[0]
- self.tokens = self.tokens[1:]
- self.last = tok
- return tok
+# line[i] == '>' or line[i] == '<' or line[i] == '=' or \
+# line[i] == '/' or line[i] == '%' or line[i] == '&' or \
+# line[i] == '!' or line[i] == '|' or line[i] == '.':
+ if line[i] == '.' and i + 2 < l and \
+ line[i+1] == '.' and line[i+2] == '.':
+ self.tokens.append(('name', '...'))
+ i = i + 3
+ continue
+
+ j = i + 1
+ if j < l and (
+ string.find("+-*><=/%&!|", line[j]) != -1):
+# line[j] == '+' or line[j] == '-' or line[j] == '*' or \
+# line[j] == '>' or line[j] == '<' or line[j] == '=' or \
+# line[j] == '/' or line[j] == '%' or line[j] == '&' or \
+# line[j] == '!' or line[j] == '|'):
+ self.tokens.append(('op', line[i:j+1]))
+ i = j + 1
+ else:
+ self.tokens.append(('op', line[i]))
+ i = i + 1
+ continue
+ s = i
+ while i < l:
+ o = ord(line[i])
+ if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
+ (o >= 48 and o <= 57) or (
+ string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
+# line[i] != ' ' and line[i] != '\t' and
+# line[i] != '(' and line[i] != ')' and
+# line[i] != '{' and line[i] != '}' and
+# line[i] != ':' and line[i] != ';' and
+# line[i] != ',' and line[i] != '+' and
+# line[i] != '-' and line[i] != '*' and
+# line[i] != '/' and line[i] != '%' and
+# line[i] != '&' and line[i] != '!' and
+# line[i] != '|' and line[i] != '[' and
+# line[i] != ']' and line[i] != '=' and
+# line[i] != '*' and line[i] != '>' and
+# line[i] != '<'):
+ i = i + 1
+ else:
+ break
+ self.tokens.append(('name', line[s:i]))
+
+ tok = self.tokens[0]
+ self.tokens = self.tokens[1:]
+ self.last = tok
+ return tok
class CParser:
"""The C module parser"""
def __init__(self, filename, idx = None):
self.filename = filename
- if len(filename) > 2 and filename[-2:] == '.h':
- self.is_header = 1
- else:
- self.is_header = 0
+ if len(filename) > 2 and filename[-2:] == '.h':
+ self.is_header = 1
+ else:
+ self.is_header = 0
self.input = open(filename)
- self.lexer = CLexer(self.input)
- if idx == None:
- self.index = index()
- else:
- self.index = idx
- self.top_comment = ""
- self.last_comment = ""
- self.comment = None
- self.collect_ref = 0
- self.no_error = 0
- self.conditionals = []
- self.defines = []
+ self.lexer = CLexer(self.input)
+ if idx == None:
+ self.index = index()
+ else:
+ self.index = idx
+ self.top_comment = ""
+ self.last_comment = ""
+ self.comment = None
+ self.collect_ref = 0
+ self.no_error = 0
+ self.conditionals = []
+ self.defines = []
def collect_references(self):
self.collect_ref = 1
@@ -579,203 +579,203 @@ class CParser:
return self.lexer.getlineno()
def index_add(self, name, module, static, type, info=None, extra = None):
- if self.is_header == 1:
- self.index.add(name, module, module, static, type, self.lineno(),
- info, extra, self.conditionals)
- else:
- self.index.add(name, None, module, static, type, self.lineno(),
- info, extra, self.conditionals)
+ if self.is_header == 1:
+ self.index.add(name, module, module, static, type, self.lineno(),
+ info, extra, self.conditionals)
+ else:
+ self.index.add(name, None, module, static, type, self.lineno(),
+ info, extra, self.conditionals)
def index_add_ref(self, name, module, static, type, info=None,
extra = None):
- if self.is_header == 1:
- self.index.add_ref(name, module, module, static, type,
- self.lineno(), info, extra, self.conditionals)
- else:
- self.index.add_ref(name, None, module, static, type, self.lineno(),
- info, extra, self.conditionals)
+ if self.is_header == 1:
+ self.index.add_ref(name, module, module, static, type,
+ self.lineno(), info, extra, self.conditionals)
+ else:
+ self.index.add_ref(name, None, module, static, type, self.lineno(),
+ info, extra, self.conditionals)
def warning(self, msg):
if self.no_error:
- return
- print msg
+ return
+ print msg
def error(self, msg, token=-1):
if self.no_error:
- return
+ return
print "Parse Error: " + msg
- if token != -1:
- print "Got token ", token
- self.lexer.debug()
- sys.exit(1)
+ if token != -1:
+ print "Got token ", token
+ self.lexer.debug()
+ sys.exit(1)
def debug(self, msg, token=-1):
print "Debug: " + msg
- if token != -1:
- print "Got token ", token
- self.lexer.debug()
+ if token != -1:
+ print "Got token ", token
+ self.lexer.debug()
def parseTopComment(self, comment):
- res = {}
- lines = string.split(comment, "\n")
- item = None
- for line in lines:
- while line != "" and (line[0] == ' ' or line[0] == '\t'):
- line = line[1:]
- while line != "" and line[0] == '*':
- line = line[1:]
- while line != "" and (line[0] == ' ' or line[0] == '\t'):
- line = line[1:]
- try:
- (it, line) = string.split(line, ":", 1)
- item = it
- while line != "" and (line[0] == ' ' or line[0] == '\t'):
- line = line[1:]
- if res.has_key(item):
- res[item] = res[item] + " " + line
- else:
- res[item] = line
- except:
- if item != None:
- if res.has_key(item):
- res[item] = res[item] + " " + line
- else:
- res[item] = line
- self.index.info = res
+ res = {}
+ lines = string.split(comment, "\n")
+ item = None
+ for line in lines:
+ while line != "" and (line[0] == ' ' or line[0] == '\t'):
+ line = line[1:]
+ while line != "" and line[0] == '*':
+ line = line[1:]
+ while line != "" and (line[0] == ' ' or line[0] == '\t'):
+ line = line[1:]
+ try:
+ (it, line) = string.split(line, ":", 1)
+ item = it
+ while line != "" and (line[0] == ' ' or line[0] == '\t'):
+ line = line[1:]
+ if res.has_key(item):
+ res[item] = res[item] + " " + line
+ else:
+ res[item] = line
+ except:
+ if item != None:
+ if res.has_key(item):
+ res[item] = res[item] + " " + line
+ else:
+ res[item] = line
+ self.index.info = res
def parseComment(self, token):
if self.top_comment == "":
- self.top_comment = token[1]
- if self.comment == None or token[1][0] == '*':
- self.comment = token[1];
- else:
- self.comment = self.comment + token[1]
- token = self.lexer.token()
+ self.top_comment = token[1]
+ if self.comment == None or token[1][0] == '*':
+ self.comment = token[1];
+ else:
+ self.comment = self.comment + token[1]
+ token = self.lexer.token()
if string.find(self.comment, "DOC_DISABLE") != -1:
- self.stop_error()
+ self.stop_error()
if string.find(self.comment, "DOC_ENABLE") != -1:
- self.start_error()
+ self.start_error()
- return token
+ return token
#
# Parse a comment block associate to a typedef
#
def parseTypeComment(self, name, quiet = 0):
if name[0:2] == '__':
- quiet = 1
+ quiet = 1
args = []
- desc = ""
+ desc = ""
if self.comment == None:
- if not quiet:
- self.warning("Missing comment for type %s" % (name))
- return((args, desc))
+ if not quiet:
+ self.warning("Missing comment for type %s" % (name))
+ return((args, desc))
if self.comment[0] != '*':
- if not quiet:
- self.warning("Missing * in type comment for %s" % (name))
- return((args, desc))
- lines = string.split(self.comment, '\n')
- if lines[0] == '*':
- del lines[0]
- if lines[0] != "* %s:" % (name):
- if not quiet:
- self.warning("Misformatted type comment for %s" % (name))
- self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
- return((args, desc))
- del lines[0]
- while len(lines) > 0 and lines[0] == '*':
- del lines[0]
- desc = ""
- while len(lines) > 0:
- l = lines[0]
- while len(l) > 0 and l[0] == '*':
- l = l[1:]
- l = string.strip(l)
- desc = desc + " " + l
- del lines[0]
-
- desc = string.strip(desc)
-
- if quiet == 0:
- if desc == "":
- self.warning("Type comment for %s lack description of the macro" % (name))
-
- return(desc)
+ if not quiet:
+ self.warning("Missing * in type comment for %s" % (name))
+ return((args, desc))
+ lines = string.split(self.comment, '\n')
+ if lines[0] == '*':
+ del lines[0]
+ if lines[0] != "* %s:" % (name):
+ if not quiet:
+ self.warning("Misformatted type comment for %s" % (name))
+ self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
+ return((args, desc))
+ del lines[0]
+ while len(lines) > 0 and lines[0] == '*':
+ del lines[0]
+ desc = ""
+ while len(lines) > 0:
+ l = lines[0]
+ while len(l) > 0 and l[0] == '*':
+ l = l[1:]
+ l = string.strip(l)
+ desc = desc + " " + l
+ del lines[0]
+
+ desc = string.strip(desc)
+
+ if quiet == 0:
+ if desc == "":
+ self.warning("Type comment for %s lack description of the macro" % (name))
+
+ return(desc)
#
# Parse a comment block associate to a macro
#
def parseMacroComment(self, name, quiet = 0):
if name[0:2] == '__':
- quiet = 1
+ quiet = 1
args = []
- desc = ""
+ desc = ""
if self.comment == None:
- if not quiet:
- self.warning("Missing comment for macro %s" % (name))
- return((args, desc))
+ if not quiet:
+ self.warning("Missing comment for macro %s" % (name))
+ return((args, desc))
if self.comment[0] != '*':
- if not quiet:
- self.warning("Missing * in macro comment for %s" % (name))
- return((args, desc))
- lines = string.split(self.comment, '\n')
- if lines[0] == '*':
- del lines[0]
- if lines[0] != "* %s:" % (name):
- if not quiet:
- self.warning("Misformatted macro comment for %s" % (name))
- self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
- return((args, desc))
- del lines[0]
- while lines[0] == '*':
- del lines[0]
- while len(lines) > 0 and lines[0][0:3] == '* @':
- l = lines[0][3:]
- try:
- (arg, desc) = string.split(l, ':', 1)
- desc=string.strip(desc)
- arg=string.strip(arg)
+ if not quiet:
+ self.warning("Missing * in macro comment for %s" % (name))
+ return((args, desc))
+ lines = string.split(self.comment, '\n')
+ if lines[0] == '*':
+ del lines[0]
+ if lines[0] != "* %s:" % (name):
+ if not quiet:
+ self.warning("Misformatted macro comment for %s" % (name))
+ self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
+ return((args, desc))
+ del lines[0]
+ while lines[0] == '*':
+ del lines[0]
+ while len(lines) > 0 and lines[0][0:3] == '* @':
+ l = lines[0][3:]
+ try:
+ (arg, desc) = string.split(l, ':', 1)
+ desc=string.strip(desc)
+ arg=string.strip(arg)
except:
- if not quiet:
- self.warning("Misformatted macro comment for %s" % (name))
- self.warning(" problem with '%s'" % (lines[0]))
- del lines[0]
- continue
- del lines[0]
- l = string.strip(lines[0])
- while len(l) > 2 and l[0:3] != '* @':
- while l[0] == '*':
- l = l[1:]
- desc = desc + ' ' + string.strip(l)
- del lines[0]
- if len(lines) == 0:
- break
- l = lines[0]
+ if not quiet:
+ self.warning("Misformatted macro comment for %s" % (name))
+ self.warning(" problem with '%s'" % (lines[0]))
+ del lines[0]
+ continue
+ del lines[0]
+ l = string.strip(lines[0])
+ while len(l) > 2 and l[0:3] != '* @':
+ while l[0] == '*':
+ l = l[1:]
+ desc = desc + ' ' + string.strip(l)
+ del lines[0]
+ if len(lines) == 0:
+ break
+ l = lines[0]
args.append((arg, desc))
- while len(lines) > 0 and lines[0] == '*':
- del lines[0]
- desc = ""
- while len(lines) > 0:
- l = lines[0]
- while len(l) > 0 and l[0] == '*':
- l = l[1:]
- l = string.strip(l)
- desc = desc + " " + l
- del lines[0]
+ while len(lines) > 0 and lines[0] == '*':
+ del lines[0]
+ desc = ""
+ while len(lines) > 0:
+ l = lines[0]
+ while len(l) > 0 and l[0] == '*':
+ l = l[1:]
+ l = string.strip(l)
+ desc = desc + " " + l
+ del lines[0]
- desc = string.strip(desc)
+ desc = string.strip(desc)
- if quiet == 0:
- if desc == "":
- self.warning("Macro comment for %s lack description of the macro" % (name))
+ if quiet == 0:
+ if desc == "":
+ self.warning("Macro comment for %s lack description of the macro" % (name))
- return((args, desc))
+ return((args, desc))
#
# Parse a comment block and merge the information found in the
@@ -786,219 +786,219 @@ class CParser:
global ignored_functions
if name == 'main':
- quiet = 1
+ quiet = 1
if name[0:2] == '__':
- quiet = 1
+ quiet = 1
if ignored_functions.has_key(name):
quiet = 1
- (ret, args) = description
- desc = ""
- retdesc = ""
+ (ret, args) = description
+ desc = ""
+ retdesc = ""
if self.comment == None:
- if not quiet:
- self.warning("Missing comment for function %s" % (name))
- return(((ret[0], retdesc), args, desc))
+ if not quiet:
+ self.warning("Missing comment for function %s" % (name))
+ return(((ret[0], retdesc), args, desc))
if self.comment[0] != '*':
- if not quiet:
- self.warning("Missing * in function comment for %s" % (name))
- return(((ret[0], retdesc), args, desc))
- lines = string.split(self.comment, '\n')
- if lines[0] == '*':
- del lines[0]
- if lines[0] != "* %s:" % (name):
- if not quiet:
- self.warning("Misformatted function comment for %s" % (name))
- self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
- return(((ret[0], retdesc), args, desc))
- del lines[0]
- while lines[0] == '*':
- del lines[0]
- nbargs = len(args)
- while len(lines) > 0 and lines[0][0:3] == '* @':
- l = lines[0][3:]
- try:
- (arg, desc) = string.split(l, ':', 1)
- desc=string.strip(desc)
- arg=string.strip(arg)
+ if not quiet:
+ self.warning("Missing * in function comment for %s" % (name))
+ return(((ret[0], retdesc), args, desc))
+ lines = string.split(self.comment, '\n')
+ if lines[0] == '*':
+ del lines[0]
+ if lines[0] != "* %s:" % (name):
+ if not quiet:
+ self.warning("Misformatted function comment for %s" % (name))
+ self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
+ return(((ret[0], retdesc), args, desc))
+ del lines[0]
+ while lines[0] == '*':
+ del lines[0]
+ nbargs = len(args)
+ while len(lines) > 0 and lines[0][0:3] == '* @':
+ l = lines[0][3:]
+ try:
+ (arg, desc) = string.split(l, ':', 1)
+ desc=string.strip(desc)
+ arg=string.strip(arg)
except:
- if not quiet:
- self.warning("Misformatted function comment for %s" % (name))
- self.warning(" problem with '%s'" % (lines[0]))
- del lines[0]
- continue
- del lines[0]
- l = string.strip(lines[0])
- while len(l) > 2 and l[0:3] != '* @':
- while l[0] == '*':
- l = l[1:]
- desc = desc + ' ' + string.strip(l)
- del lines[0]
- if len(lines) == 0:
- break
- l = lines[0]
- i = 0
- while i < nbargs:
- if args[i][1] == arg:
- args[i] = (args[i][0], arg, desc)
- break;
- i = i + 1
- if i >= nbargs:
- if not quiet:
- self.warning("Unable to find arg %s from function comment for %s" % (
- arg, name))
- while len(lines) > 0 and lines[0] == '*':
- del lines[0]
- desc = None
- while len(lines) > 0:
- l = lines[0]
- i = 0
- # Remove all leading '*', followed by at most one ' ' character
- # since we need to preserve correct identation of code examples
- while i < len(l) and l[i] == '*':
- i = i + 1
- if i > 0:
- if i < len(l) and l[i] == ' ':
- i = i + 1
- l = l[i:]
- if len(l) >= 6 and l[0:7] == "returns" or l[0:7] == "Returns":
- try:
- l = string.split(l, ' ', 1)[1]
- except:
- l = ""
- retdesc = string.strip(l)
- del lines[0]
- while len(lines) > 0:
- l = lines[0]
- while len(l) > 0 and l[0] == '*':
- l = l[1:]
- l = string.strip(l)
- retdesc = retdesc + " " + l
- del lines[0]
- else:
- if desc is not None:
- desc = desc + "\n" + l
- else:
- desc = l
- del lines[0]
-
- if desc is None:
- desc = ""
- retdesc = string.strip(retdesc)
- desc = string.strip(desc)
-
- if quiet == 0:
- #
- # report missing comments
- #
- i = 0
- while i < nbargs:
- if args[i][2] == None and args[i][0] != "void" and args[i][1] != None:
- self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
- i = i + 1
- if retdesc == "" and ret[0] != "void":
- self.warning("Function comment for %s lacks description of return value" % (name))
- if desc == "":
- self.warning("Function comment for %s lacks description of the function" % (name))
-
-
- return(((ret[0], retdesc), args, desc))
+ if not quiet:
+ self.warning("Misformatted function comment for %s" % (name))
+ self.warning(" problem with '%s'" % (lines[0]))
+ del lines[0]
+ continue
+ del lines[0]
+ l = string.strip(lines[0])
+ while len(l) > 2 and l[0:3] != '* @':
+ while l[0] == '*':
+ l = l[1:]
+ desc = desc + ' ' + string.strip(l)
+ del lines[0]
+ if len(lines) == 0:
+ break
+ l = lines[0]
+ i = 0
+ while i < nbargs:
+ if args[i][1] == arg:
+ args[i] = (args[i][0], arg, desc)
+ break;
+ i = i + 1
+ if i >= nbargs:
+ if not quiet:
+ self.warning("Unable to find arg %s from function comment for %s" % (
+ arg, name))
+ while len(lines) > 0 and lines[0] == '*':
+ del lines[0]
+ desc = None
+ while len(lines) > 0:
+ l = lines[0]
+ i = 0
+ # Remove all leading '*', followed by at most one ' ' character
+ # since we need to preserve correct identation of code examples
+ while i < len(l) and l[i] == '*':
+ i = i + 1
+ if i > 0:
+ if i < len(l) and l[i] == ' ':
+ i = i + 1
+ l = l[i:]
+ if len(l) >= 6 and l[0:7] == "returns" or l[0:7] == "Returns":
+ try:
+ l = string.split(l, ' ', 1)[1]
+ except:
+ l = ""
+ retdesc = string.strip(l)
+ del lines[0]
+ while len(lines) > 0:
+ l = lines[0]
+ while len(l) > 0 and l[0] == '*':
+ l = l[1:]
+ l = string.strip(l)
+ retdesc = retdesc + " " + l
+ del lines[0]
+ else:
+ if desc is not None:
+ desc = desc + "\n" + l
+ else:
+ desc = l
+ del lines[0]
+
+ if desc is None:
+ desc = ""
+ retdesc = string.strip(retdesc)
+ desc = string.strip(desc)
+
+ if quiet == 0:
+ #
+ # report missing comments
+ #
+ i = 0
+ while i < nbargs:
+ if args[i][2] == None and args[i][0] != "void" and args[i][1] != None:
+ self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
+ i = i + 1
+ if retdesc == "" and ret[0] != "void":
+ self.warning("Function comment for %s lacks description of return value" % (name))
+ if desc == "":
+ self.warning("Function comment for %s lacks description of the function" % (name))
+
+
+ return(((ret[0], retdesc), args, desc))
def parsePreproc(self, token):
- if debug:
- print "=> preproc ", token, self.lexer.tokens
+ if debug:
+ print "=> preproc ", token, self.lexer.tokens
name = token[1]
- if name == "#include":
- token = self.lexer.token()
- if token == None:
- return None
- if token[0] == 'preproc':
- self.index_add(token[1], self.filename, not self.is_header,
- "include")
- return self.lexer.token()
- return token
- if name == "#define":
- token = self.lexer.token()
- if token == None:
- return None
- if token[0] == 'preproc':
- # TODO macros with arguments
- name = token[1]
- lst = []
- token = self.lexer.token()
- while token != None and token[0] == 'preproc' and \
- token[1][0] != '#':
- lst.append(token[1])
- token = self.lexer.token()
+ if name == "#include":
+ token = self.lexer.token()
+ if token == None:
+ return None
+ if token[0] == 'preproc':
+ self.index_add(token[1], self.filename, not self.is_header,
+ "include")
+ return self.lexer.token()
+ return token
+ if name == "#define":
+ token = self.lexer.token()
+ if token == None:
+ return None
+ if token[0] == 'preproc':
+ # TODO macros with arguments
+ name = token[1]
+ lst = []
+ token = self.lexer.token()
+ while token != None and token[0] == 'preproc' and \
+ token[1][0] != '#':
+ lst.append(token[1])
+ token = self.lexer.token()
try:
- name = string.split(name, '(') [0]
+ name = string.split(name, '(') [0]
except:
pass
info = self.parseMacroComment(name, not self.is_header)
- self.index_add(name, self.filename, not self.is_header,
- "macro", info)
- return token
-
- #
- # Processing of conditionals modified by Bill 1/1/05
- #
- # We process conditionals (i.e. tokens from #ifdef, #ifndef,
- # #if, #else and #endif) for headers and mainline code,
- # store the ones from the header in libxml2-api.xml, and later
- # (in the routine merge_public) verify that the two (header and
- # mainline code) agree.
- #
- # There is a small problem with processing the headers. Some of
- # the variables are not concerned with enabling / disabling of
- # library functions (e.g. '__XML_PARSER_H__'), and we don't want
- # them to be included in libxml2-api.xml, or involved in
- # the check between the header and the mainline code. To
- # accomplish this, we ignore any conditional which doesn't include
- # the string 'ENABLED'
- #
- if name == "#ifdef":
- apstr = self.lexer.tokens[0][1]
- try:
- self.defines.append(apstr)
- if string.find(apstr, 'ENABLED') != -1:
- self.conditionals.append("defined(%s)" % apstr)
- except:
- pass
- elif name == "#ifndef":
- apstr = self.lexer.tokens[0][1]
- try:
- self.defines.append(apstr)
- if string.find(apstr, 'ENABLED') != -1:
- self.conditionals.append("!defined(%s)" % apstr)
- except:
- pass
- elif name == "#if":
- apstr = ""
- for tok in self.lexer.tokens:
- if apstr != "":
- apstr = apstr + " "
- apstr = apstr + tok[1]
- try:
- self.defines.append(apstr)
- if string.find(apstr, 'ENABLED') != -1:
- self.conditionals.append(apstr)
- except:
- pass
- elif name == "#else":
- if self.conditionals != [] and \
- string.find(self.defines[-1], 'ENABLED') != -1:
- self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
- elif name == "#endif":
- if self.conditionals != [] and \
- string.find(self.defines[-1], 'ENABLED') != -1:
- self.conditionals = self.conditionals[:-1]
- self.defines = self.defines[:-1]
- token = self.lexer.token()
- while token != None and token[0] == 'preproc' and \
- token[1][0] != '#':
- token = self.lexer.token()
- return token
+ self.index_add(name, self.filename, not self.is_header,
+ "macro", info)
+ return token
+
+ #
+ # Processing of conditionals modified by Bill 1/1/05
+ #
+ # We process conditionals (i.e. tokens from #ifdef, #ifndef,
+ # #if, #else and #endif) for headers and mainline code,
+ # store the ones from the header in libxml2-api.xml, and later
+ # (in the routine merge_public) verify that the two (header and
+ # mainline code) agree.
+ #
+ # There is a small problem with processing the headers. Some of
+ # the variables are not concerned with enabling / disabling of
+ # library functions (e.g. '__XML_PARSER_H__'), and we don't want
+ # them to be included in libxml2-api.xml, or involved in
+ # the check between the header and the mainline code. To
+ # accomplish this, we ignore any conditional which doesn't include
+ # the string 'ENABLED'
+ #
+ if name == "#ifdef":
+ apstr = self.lexer.tokens[0][1]
+ try:
+ self.defines.append(apstr)
+ if string.find(apstr, 'ENABLED') != -1:
+ self.conditionals.append("defined(%s)" % apstr)
+ except:
+ pass
+ elif name == "#ifndef":
+ apstr = self.lexer.tokens[0][1]
+ try:
+ self.defines.append(apstr)
+ if string.find(apstr, 'ENABLED') != -1:
+ self.conditionals.append("!defined(%s)" % apstr)
+ except:
+ pass
+ elif name == "#if":
+ apstr = ""
+ for tok in self.lexer.tokens:
+ if apstr != "":
+ apstr = apstr + " "
+ apstr = apstr + tok[1]
+ try:
+ self.defines.append(apstr)
+ if string.find(apstr, 'ENABLED') != -1:
+ self.conditionals.append(apstr)
+ except:
+ pass
+ elif name == "#else":
+ if self.conditionals != [] and \
+ string.find(self.defines[-1], 'ENABLED') != -1:
+ self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
+ elif name == "#endif":
+ if self.conditionals != [] and \
+ string.find(self.defines[-1], 'ENABLED') != -1:
+ self.conditionals = self.conditionals[:-1]
+ self.defines = self.defines[:-1]
+ token = self.lexer.token()
+ while token != None and token[0] == 'preproc' and \
+ token[1][0] != '#':
+ token = self.lexer.token()
+ return token
#
# token acquisition on top of the lexer, it handle internally
@@ -1012,89 +1012,89 @@ class CParser:
global ignored_words
token = self.lexer.token()
- while token != None:
- if token[0] == 'comment':
- token = self.parseComment(token)
- continue
- elif token[0] == 'preproc':
- token = self.parsePreproc(token)
- continue
- elif token[0] == "name" and token[1] == "__const":
- token = ("name", "const")
- return token
- elif token[0] == "name" and token[1] == "__attribute":
- token = self.lexer.token()
- while token != None and token[1] != ";":
- token = self.lexer.token()
- return token
- elif token[0] == "name" and ignored_words.has_key(token[1]):
- (n, info) = ignored_words[token[1]]
- i = 0
- while i < n:
- token = self.lexer.token()
- i = i + 1
- token = self.lexer.token()
- continue
- else:
- if debug:
- print "=> ", token
- return token
- return None
+ while token != None:
+ if token[0] == 'comment':
+ token = self.parseComment(token)
+ continue
+ elif token[0] == 'preproc':
+ token = self.parsePreproc(token)
+ continue
+ elif token[0] == "name" and token[1] == "__const":
+ token = ("name", "const")
+ return token
+ elif token[0] == "name" and token[1] == "__attribute":
+ token = self.lexer.token()
+ while token != None and token[1] != ";":
+ token = self.lexer.token()
+ return token
+ elif token[0] == "name" and ignored_words.has_key(token[1]):
+ (n, info) = ignored_words[token[1]]
+ i = 0
+ while i < n:
+ token = self.lexer.token()
+ i = i + 1
+ token = self.lexer.token()
+ continue
+ else:
+ if debug:
+ print "=> ", token
+ return token
+ return None
#
# Parse a typedef, it records the type and its name.
#
def parseTypedef(self, token):
if token == None:
- return None
- token = self.parseType(token)
- if token == None:
- self.error("parsing typedef")
- return None
- base_type = self.type
- type = base_type
- #self.debug("end typedef type", token)
- while token != None:
- if token[0] == "name":
- name = token[1]
- signature = self.signature
- if signature != None:
- type = string.split(type, '(')[0]
- d = self.mergeFunctionComment(name,
- ((type, None), signature), 1)
- self.index_add(name, self.filename, not self.is_header,
- "functype", d)
- else:
- if base_type == "struct":
- self.index_add(name, self.filename, not self.is_header,
- "struct", type)
- base_type = "struct " + name
- else:
- # TODO report missing or misformatted comments
- info = self.parseTypeComment(name, 1)
- self.index_add(name, self.filename, not self.is_header,
- "typedef", type, info)
- token = self.token()
- else:
- self.error("parsing typedef: expecting a name")
- return token
- #self.debug("end typedef", token)
- if token != None and token[0] == 'sep' and token[1] == ',':
- type = base_type
- token = self.token()
- while token != None and token[0] == "op":
- type = type + token[1]
- token = self.token()
- elif token != None and token[0] == 'sep' and token[1] == ';':
- break;
- elif token != None and token[0] == 'name':
- type = base_type
- continue;
- else:
- self.error("parsing typedef: expecting ';'", token)
- return token
- token = self.token()
- return token
+ return None
+ token = self.parseType(token)
+ if token == None:
+ self.error("parsing typedef")
+ return None
+ base_type = self.type
+ type = base_type
+ #self.debug("end typedef type", token)
+ while token != None:
+ if token[0] == "name":
+ name = token[1]
+ signature = self.signature
+ if signature != None:
+ type = string.split(type, '(')[0]
+ d = self.mergeFunctionComment(name,
+ ((type, None), signature), 1)
+ self.index_add(name, self.filename, not self.is_header,
+ "functype", d)
+ else:
+ if base_type == "struct":
+ self.index_add(name, self.filename, not self.is_header,
+ "struct", type)
+ base_type = "struct " + name
+ else:
+ # TODO report missing or misformatted comments
+ info = self.parseTypeComment(name, 1)
+ self.index_add(name, self.filename, not self.is_header,
+ "typedef", type, info)
+ token = self.token()
+ else:
+ self.error("parsing typedef: expecting a name")
+ return token
+ #self.debug("end typedef", token)
+ if token != None and token[0] == 'sep' and token[1] == ',':
+ type = base_type
+ token = self.token()
+ while token != None and token[0] == "op":
+ type = type + token[1]
+ token = self.token()
+ elif token != None and token[0] == 'sep' and token[1] == ';':
+ break;
+ elif token != None and token[0] == 'name':
+ type = base_type
+ continue;
+ else:
+ self.error("parsing typedef: expecting ';'", token)
+ return token
+ token = self.token()
+ return token
#
# Parse a C code block, used for functions it parse till
@@ -1102,138 +1102,138 @@ class CParser:
#
def parseBlock(self, token):
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- self.comment = None
- token = self.token()
- return token
- else:
- if self.collect_ref == 1:
- oldtok = token
- token = self.token()
- if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
- if token[0] == "sep" and token[1] == "(":
- self.index_add_ref(oldtok[1], self.filename,
- 0, "function")
- token = self.token()
- elif token[0] == "name":
- token = self.token()
- if token[0] == "sep" and (token[1] == ";" or
- token[1] == "," or token[1] == "="):
- self.index_add_ref(oldtok[1], self.filename,
- 0, "type")
- elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
- self.index_add_ref(oldtok[1], self.filename,
- 0, "typedef")
- elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
- self.index_add_ref(oldtok[1], self.filename,
- 0, "typedef")
-
- else:
- token = self.token()
- return token
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ self.comment = None
+ token = self.token()
+ return token
+ else:
+ if self.collect_ref == 1:
+ oldtok = token
+ token = self.token()
+ if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
+ if token[0] == "sep" and token[1] == "(":
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "function")
+ token = self.token()
+ elif token[0] == "name":
+ token = self.token()
+ if token[0] == "sep" and (token[1] == ";" or
+ token[1] == "," or token[1] == "="):
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "type")
+ elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "typedef")
+ elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
+ self.index_add_ref(oldtok[1], self.filename,
+ 0, "typedef")
+
+ else:
+ token = self.token()
+ return token
#
# Parse a C struct definition till the balancing }
#
def parseStruct(self, token):
fields = []
- #self.debug("start parseStruct", token)
+ #self.debug("start parseStruct", token)
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- self.struct_fields = fields
- #self.debug("end parseStruct", token)
- #print fields
- token = self.token()
- return token
- else:
- base_type = self.type
- #self.debug("before parseType", token)
- token = self.parseType(token)
- #self.debug("after parseType", token)
- if token != None and token[0] == "name":
- fname = token[1]
- token = self.token()
- if token[0] == "sep" and token[1] == ";":
- self.comment = None
- token = self.token()
- fields.append((self.type, fname, self.comment))
- self.comment = None
- else:
- self.error("parseStruct: expecting ;", token)
- elif token != None and token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- if token != None and token[0] == "name":
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == ";":
- token = self.token()
- else:
- self.error("parseStruct: expecting ;", token)
- else:
- self.error("parseStruct: name", token)
- token = self.token()
- self.type = base_type;
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ self.struct_fields = fields
+ #self.debug("end parseStruct", token)
+ #print fields
+ token = self.token()
+ return token
+ else:
+ base_type = self.type
+ #self.debug("before parseType", token)
+ token = self.parseType(token)
+ #self.debug("after parseType", token)
+ if token != None and token[0] == "name":
+ fname = token[1]
+ token = self.token()
+ if token[0] == "sep" and token[1] == ";":
+ self.comment = None
+ token = self.token()
+ fields.append((self.type, fname, self.comment))
+ self.comment = None
+ else:
+ self.error("parseStruct: expecting ;", token)
+ elif token != None and token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ if token != None and token[0] == "name":
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == ";":
+ token = self.token()
+ else:
+ self.error("parseStruct: expecting ;", token)
+ else:
+ self.error("parseStruct: name", token)
+ token = self.token()
+ self.type = base_type;
self.struct_fields = fields
- #self.debug("end parseStruct", token)
- #print fields
- return token
+ #self.debug("end parseStruct", token)
+ #print fields
+ return token
#
# Parse a C enum block, parse till the balancing }
#
def parseEnumBlock(self, token):
self.enums = []
- name = None
- self.comment = None
- comment = ""
- value = "0"
+ name = None
+ self.comment = None
+ comment = ""
+ value = "0"
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- if name != None:
- if self.comment != None:
- comment = self.comment
- self.comment = None
- self.enums.append((name, value, comment))
- token = self.token()
- return token
- elif token[0] == "name":
- if name != None:
- if self.comment != None:
- comment = string.strip(self.comment)
- self.comment = None
- self.enums.append((name, value, comment))
- name = token[1]
- comment = ""
- token = self.token()
- if token[0] == "op" and token[1][0] == "=":
- value = ""
- if len(token[1]) > 1:
- value = token[1][1:]
- token = self.token()
- while token[0] != "sep" or (token[1] != ',' and
- token[1] != '}'):
- value = value + token[1]
- token = self.token()
- else:
- try:
- value = "%d" % (int(value) + 1)
- except:
- self.warning("Failed to compute value of enum %s" % (name))
- value=""
- if token[0] == "sep" and token[1] == ",":
- token = self.token()
- else:
- token = self.token()
- return token
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ if name != None:
+ if self.comment != None:
+ comment = self.comment
+ self.comment = None
+ self.enums.append((name, value, comment))
+ token = self.token()
+ return token
+ elif token[0] == "name":
+ if name != None:
+ if self.comment != None:
+ comment = string.strip(self.comment)
+ self.comment = None
+ self.enums.append((name, value, comment))
+ name = token[1]
+ comment = ""
+ token = self.token()
+ if token[0] == "op" and token[1][0] == "=":
+ value = ""
+ if len(token[1]) > 1:
+ value = token[1][1:]
+ token = self.token()
+ while token[0] != "sep" or (token[1] != ',' and
+ token[1] != '}'):
+ value = value + token[1]
+ token = self.token()
+ else:
+ try:
+ value = "%d" % (int(value) + 1)
+ except:
+ self.warning("Failed to compute value of enum %s" % (name))
+ value=""
+ if token[0] == "sep" and token[1] == ",":
+ token = self.token()
+ else:
+ token = self.token()
+ return token
#
# Parse a C definition block, used for structs it parse till
@@ -1241,15 +1241,15 @@ class CParser:
#
def parseTypeBlock(self, token):
while token != None:
- if token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseTypeBlock(token)
- elif token[0] == "sep" and token[1] == "}":
- token = self.token()
- return token
- else:
- token = self.token()
- return token
+ if token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseTypeBlock(token)
+ elif token[0] == "sep" and token[1] == "}":
+ token = self.token()
+ return token
+ else:
+ token = self.token()
+ return token
#
# Parse a type: the fact that the type name can either occur after
@@ -1258,221 +1258,221 @@ class CParser:
#
def parseType(self, token):
self.type = ""
- self.struct_fields = []
+ self.struct_fields = []
self.signature = None
- if token == None:
- return token
-
- while token[0] == "name" and (
- token[1] == "const" or \
- token[1] == "unsigned" or \
- token[1] == "signed"):
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- token = self.token()
+ if token == None:
+ return token
+
+ while token[0] == "name" and (
+ token[1] == "const" or \
+ token[1] == "unsigned" or \
+ token[1] == "signed"):
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ token = self.token()
if token[0] == "name" and token[1] == "long":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
-
- # some read ahead for long long
- oldtmp = token
- token = self.token()
- if token[0] == "name" and token[1] == "long":
- self.type = self.type + " " + token[1]
- else:
- self.push(token)
- token = oldtmp
-
- if token[0] == "name" and token[1] == "int":
- if self.type == "":
- self.type = tmp[1]
- else:
- self.type = self.type + " " + tmp[1]
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+
+ # some read ahead for long long
+ oldtmp = token
+ token = self.token()
+ if token[0] == "name" and token[1] == "long":
+ self.type = self.type + " " + token[1]
+ else:
+ self.push(token)
+ token = oldtmp
+
+ if token[0] == "name" and token[1] == "int":
+ if self.type == "":
+ self.type = tmp[1]
+ else:
+ self.type = self.type + " " + tmp[1]
elif token[0] == "name" and token[1] == "short":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- if token[0] == "name" and token[1] == "int":
- if self.type == "":
- self.type = tmp[1]
- else:
- self.type = self.type + " " + tmp[1]
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ if token[0] == "name" and token[1] == "int":
+ if self.type == "":
+ self.type = tmp[1]
+ else:
+ self.type = self.type + " " + tmp[1]
elif token[0] == "name" and token[1] == "struct":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- token = self.token()
- nametok = None
- if token[0] == "name":
- nametok = token
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseStruct(token)
- elif token != None and token[0] == "op" and token[1] == "*":
- self.type = self.type + " " + nametok[1] + " *"
- token = self.token()
- while token != None and token[0] == "op" and token[1] == "*":
- self.type = self.type + " *"
- token = self.token()
- if token[0] == "name":
- nametok = token
- token = self.token()
- else:
- self.error("struct : expecting name", token)
- return token
- elif token != None and token[0] == "name" and nametok != None:
- self.type = self.type + " " + nametok[1]
- return token
-
- if nametok != None:
- self.lexer.push(token)
- token = nametok
- return token
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ token = self.token()
+ nametok = None
+ if token[0] == "name":
+ nametok = token
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseStruct(token)
+ elif token != None and token[0] == "op" and token[1] == "*":
+ self.type = self.type + " " + nametok[1] + " *"
+ token = self.token()
+ while token != None and token[0] == "op" and token[1] == "*":
+ self.type = self.type + " *"
+ token = self.token()
+ if token[0] == "name":
+ nametok = token
+ token = self.token()
+ else:
+ self.error("struct : expecting name", token)
+ return token
+ elif token != None and token[0] == "name" and nametok != None:
+ self.type = self.type + " " + nametok[1]
+ return token
+
+ if nametok != None:
+ self.lexer.push(token)
+ token = nametok
+ return token
elif token[0] == "name" and token[1] == "enum":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- self.enums = []
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == "{":
- token = self.token()
- token = self.parseEnumBlock(token)
- else:
- self.error("parsing enum: expecting '{'", token)
- enum_type = None
- if token != None and token[0] != "name":
- self.lexer.push(token)
- token = ("name", "enum")
- else:
- enum_type = token[1]
- for enum in self.enums:
- self.index_add(enum[0], self.filename,
- not self.is_header, "enum",
- (enum[1], enum[2], enum_type))
- return token
-
- elif token[0] == "name":
- if self.type == "":
- self.type = token[1]
- else:
- self.type = self.type + " " + token[1]
- else:
- self.error("parsing type %s: expecting a name" % (self.type),
- token)
- return token
- token = self.token()
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ self.enums = []
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == "{":
+ token = self.token()
+ token = self.parseEnumBlock(token)
+ else:
+ self.error("parsing enum: expecting '{'", token)
+ enum_type = None
+ if token != None and token[0] != "name":
+ self.lexer.push(token)
+ token = ("name", "enum")
+ else:
+ enum_type = token[1]
+ for enum in self.enums:
+ self.index_add(enum[0], self.filename,
+ not self.is_header, "enum",
+ (enum[1], enum[2], enum_type))
+ return token
+
+ elif token[0] == "name":
+ if self.type == "":
+ self.type = token[1]
+ else:
+ self.type = self.type + " " + token[1]
+ else:
+ self.error("parsing type %s: expecting a name" % (self.type),
+ token)
+ return token
+ token = self.token()
while token != None and (token[0] == "op" or
- token[0] == "name" and token[1] == "const"):
- self.type = self.type + " " + token[1]
- token = self.token()
-
- #
- # if there is a parenthesis here, this means a function type
- #
- if token != None and token[0] == "sep" and token[1] == '(':
- self.type = self.type + token[1]
- token = self.token()
- while token != None and token[0] == "op" and token[1] == '*':
- self.type = self.type + token[1]
- token = self.token()
- if token == None or token[0] != "name" :
- self.error("parsing function type, name expected", token);
- return token
- self.type = self.type + token[1]
- nametok = token
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == ')':
- self.type = self.type + token[1]
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == '(':
- token = self.token()
- type = self.type;
- token = self.parseSignature(token);
- self.type = type;
- else:
- self.error("parsing function type, '(' expected", token);
- return token
- else:
- self.error("parsing function type, ')' expected", token);
- return token
- self.lexer.push(token)
- token = nametok
- return token
+ token[0] == "name" and token[1] == "const"):
+ self.type = self.type + " " + token[1]
+ token = self.token()
#
- # do some lookahead for arrays
- #
- if token != None and token[0] == "name":
- nametok = token
- token = self.token()
- if token != None and token[0] == "sep" and token[1] == '[':
- self.type = self.type + nametok[1]
- while token != None and token[0] == "sep" and token[1] == '[':
- self.type = self.type + token[1]
- token = self.token()
- while token != None and token[0] != 'sep' and \
- token[1] != ']' and token[1] != ';':
- self.type = self.type + token[1]
- token = self.token()
- if token != None and token[0] == 'sep' and token[1] == ']':
- self.type = self.type + token[1]
- token = self.token()
- else:
- self.error("parsing array type, ']' expected", token);
- return token
- elif token != None and token[0] == "sep" and token[1] == ':':
- # remove :12 in case it's a limited int size
- token = self.token()
- token = self.token()
- self.lexer.push(token)
- token = nametok
-
- return token
+ # if there is a parenthesis here, this means a function type
+ #
+ if token != None and token[0] == "sep" and token[1] == '(':
+ self.type = self.type + token[1]
+ token = self.token()
+ while token != None and token[0] == "op" and token[1] == '*':
+ self.type = self.type + token[1]
+ token = self.token()
+ if token == None or token[0] != "name" :
+ self.error("parsing function type, name expected", token);
+ return token
+ self.type = self.type + token[1]
+ nametok = token
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == ')':
+ self.type = self.type + token[1]
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == '(':
+ token = self.token()
+ type = self.type;
+ token = self.parseSignature(token);
+ self.type = type;
+ else:
+ self.error("parsing function type, '(' expected", token);
+ return token
+ else:
+ self.error("parsing function type, ')' expected", token);
+ return token
+ self.lexer.push(token)
+ token = nametok
+ return token
+
+ #
+ # do some lookahead for arrays
+ #
+ if token != None and token[0] == "name":
+ nametok = token
+ token = self.token()
+ if token != None and token[0] == "sep" and token[1] == '[':
+ self.type = self.type + nametok[1]
+ while token != None and token[0] == "sep" and token[1] == '[':
+ self.type = self.type + token[1]
+ token = self.token()
+ while token != None and token[0] != 'sep' and \
+ token[1] != ']' and token[1] != ';':
+ self.type = self.type + token[1]
+ token = self.token()
+ if token != None and token[0] == 'sep' and token[1] == ']':
+ self.type = self.type + token[1]
+ token = self.token()
+ else:
+ self.error("parsing array type, ']' expected", token);
+ return token
+ elif token != None and token[0] == "sep" and token[1] == ':':
+ # remove :12 in case it's a limited int size
+ token = self.token()
+ token = self.token()
+ self.lexer.push(token)
+ token = nametok
+
+ return token
#
# Parse a signature: '(' has been parsed and we scan the type definition
# up to the ')' included
def parseSignature(self, token):
signature = []
- if token != None and token[0] == "sep" and token[1] == ')':
- self.signature = []
- token = self.token()
- return token
- while token != None:
- token = self.parseType(token)
- if token != None and token[0] == "name":
- signature.append((self.type, token[1], None))
- token = self.token()
- elif token != None and token[0] == "sep" and token[1] == ',':
- token = self.token()
- continue
- elif token != None and token[0] == "sep" and token[1] == ')':
- # only the type was provided
- if self.type == "...":
- signature.append((self.type, "...", None))
- else:
- signature.append((self.type, None, None))
- if token != None and token[0] == "sep":
- if token[1] == ',':
- token = self.token()
- continue
- elif token[1] == ')':
- token = self.token()
- break
- self.signature = signature
- return token
+ if token != None and token[0] == "sep" and token[1] == ')':
+ self.signature = []
+ token = self.token()
+ return token
+ while token != None:
+ token = self.parseType(token)
+ if token != None and token[0] == "name":
+ signature.append((self.type, token[1], None))
+ token = self.token()
+ elif token != None and token[0] == "sep" and token[1] == ',':
+ token = self.token()
+ continue
+ elif token != None and token[0] == "sep" and token[1] == ')':
+ # only the type was provided
+ if self.type == "...":
+ signature.append((self.type, "...", None))
+ else:
+ signature.append((self.type, None, None))
+ if token != None and token[0] == "sep":
+ if token[1] == ',':
+ token = self.token()
+ continue
+ elif token[1] == ')':
+ token = self.token()
+ break
+ self.signature = signature
+ return token
#
# Parse a global definition, be it a type, variable or function
@@ -1481,134 +1481,134 @@ class CParser:
def parseGlobal(self, token):
static = 0
if token[1] == 'extern':
- token = self.token()
- if token == None:
- return token
- if token[0] == 'string':
- if token[1] == 'C':
- token = self.token()
- if token == None:
- return token
- if token[0] == 'sep' and token[1] == "{":
- token = self.token()
-# print 'Entering extern "C line ', self.lineno()
- while token != None and (token[0] != 'sep' or
- token[1] != "}"):
- if token[0] == 'name':
- token = self.parseGlobal(token)
- else:
- self.error(
- "token %s %s unexpected at the top level" % (
- token[0], token[1]))
- token = self.parseGlobal(token)
-# print 'Exiting extern "C" line', self.lineno()
- token = self.token()
- return token
- else:
- return token
- elif token[1] == 'static':
- static = 1
- token = self.token()
- if token == None or token[0] != 'name':
- return token
-
- if token[1] == 'typedef':
- token = self.token()
- return self.parseTypedef(token)
- else:
- token = self.parseType(token)
- type_orig = self.type
- if token == None or token[0] != "name":
- return token
- type = type_orig
- self.name = token[1]
- token = self.token()
- while token != None and (token[0] == "sep" or token[0] == "op"):
- if token[0] == "sep":
- if token[1] == "[":
- type = type + token[1]
- token = self.token()
- while token != None and (token[0] != "sep" or \
- token[1] != ";"):
- type = type + token[1]
- token = self.token()
-
- if token != None and token[0] == "op" and token[1] == "=":
- #
- # Skip the initialization of the variable
- #
- token = self.token()
- if token[0] == 'sep' and token[1] == '{':
- token = self.token()
- token = self.parseBlock(token)
- else:
- self.comment = None
- while token != None and (token[0] != "sep" or \
- (token[1] != ';' and token[1] != ',')):
- token = self.token()
- self.comment = None
- if token == None or token[0] != "sep" or (token[1] != ';' and
- token[1] != ','):
- self.error("missing ';' or ',' after value")
-
- if token != None and token[0] == "sep":
- if token[1] == ";":
- self.comment = None
- token = self.token()
- if type == "struct":
- self.index_add(self.name, self.filename,
- not self.is_header, "struct", self.struct_fields)
- else:
- self.index_add(self.name, self.filename,
- not self.is_header, "variable", type)
- break
- elif token[1] == "(":
- token = self.token()
- token = self.parseSignature(token)
- if token == None:
- return None
- if token[0] == "sep" and token[1] == ";":
- d = self.mergeFunctionComment(self.name,
- ((type, None), self.signature), 1)
- self.index_add(self.name, self.filename, static,
- "function", d)
- token = self.token()
- elif token[0] == "sep" and token[1] == "{":
- d = self.mergeFunctionComment(self.name,
- ((type, None), self.signature), static)
- self.index_add(self.name, self.filename, static,
- "function", d)
- token = self.token()
- token = self.parseBlock(token);
- elif token[1] == ',':
- self.comment = None
- self.index_add(self.name, self.filename, static,
- "variable", type)
- type = type_orig
- token = self.token()
- while token != None and token[0] == "sep":
- type = type + token[1]
- token = self.token()
- if token != None and token[0] == "name":
- self.name = token[1]
- token = self.token()
- else:
- break
-
- return token
+ token = self.token()
+ if token == None:
+ return token
+ if token[0] == 'string':
+ if token[1] == 'C':
+ token = self.token()
+ if token == None:
+ return token
+ if token[0] == 'sep' and token[1] == "{":
+ token = self.token()
+# print 'Entering extern "C line ', self.lineno()
+ while token != None and (token[0] != 'sep' or
+ token[1] != "}"):
+ if token[0] == 'name':
+ token = self.parseGlobal(token)
+ else:
+ self.error(
+ "token %s %s unexpected at the top level" % (
+ token[0], token[1]))
+ token = self.parseGlobal(token)
+# print 'Exiting extern "C" line', self.lineno()
+ token = self.token()
+ return token
+ else:
+ return token
+ elif token[1] == 'static':
+ static = 1
+ token = self.token()
+ if token == None or token[0] != 'name':
+ return token
+
+ if token[1] == 'typedef':
+ token = self.token()
+ return self.parseTypedef(token)
+ else:
+ token = self.parseType(token)
+ type_orig = self.type
+ if token == None or token[0] != "name":
+ return token
+ type = type_orig
+ self.name = token[1]
+ token = self.token()
+ while token != None and (token[0] == "sep" or token[0] == "op"):
+ if token[0] == "sep":
+ if token[1] == "[":
+ type = type + token[1]
+ token = self.token()
+ while token != None and (token[0] != "sep" or \
+ token[1] != ";"):
+ type = type + token[1]
+ token = self.token()
+
+ if token != None and token[0] == "op" and token[1] == "=":
+ #
+ # Skip the initialization of the variable
+ #
+ token = self.token()
+ if token[0] == 'sep' and token[1] == '{':
+ token = self.token()
+ token = self.parseBlock(token)
+ else:
+ self.comment = None
+ while token != None and (token[0] != "sep" or \
+ (token[1] != ';' and token[1] != ',')):
+ token = self.token()
+ self.comment = None
+ if token == None or token[0] != "sep" or (token[1] != ';' and
+ token[1] != ','):
+ self.error("missing ';' or ',' after value")
+
+ if token != None and token[0] == "sep":
+ if token[1] == ";":
+ self.comment = None
+ token = self.token()
+ if type == "struct":
+ self.index_add(self.name, self.filename,
+ not self.is_header, "struct", self.struct_fields)
+ else:
+ self.index_add(self.name, self.filename,
+ not self.is_header, "variable", type)
+ break
+ elif token[1] == "(":
+ token = self.token()
+ token = self.parseSignature(token)
+ if token == None:
+ return None
+ if token[0] == "sep" and token[1] == ";":
+ d = self.mergeFunctionComment(self.name,
+ ((type, None), self.signature), 1)
+ self.index_add(self.name, self.filename, static,
+ "function", d)
+ token = self.token()
+ elif token[0] == "sep" and token[1] == "{":
+ d = self.mergeFunctionComment(self.name,
+ ((type, None), self.signature), static)
+ self.index_add(self.name, self.filename, static,
+ "function", d)
+ token = self.token()
+ token = self.parseBlock(token);
+ elif token[1] == ',':
+ self.comment = None
+ self.index_add(self.name, self.filename, static,
+ "variable", type)
+ type = type_orig
+ token = self.token()
+ while token != None and token[0] == "sep":
+ type = type + token[1]
+ token = self.token()
+ if token != None and token[0] == "name":
+ self.name = token[1]
+ token = self.token()
+ else:
+ break
+
+ return token
def parse(self):
self.warning("Parsing %s" % (self.filename))
token = self.token()
- while token != None:
+ while token != None:
if token[0] == 'name':
- token = self.parseGlobal(token)
+ token = self.parseGlobal(token)
else:
- self.error("token %s %s unexpected at the top level" % (
- token[0], token[1]))
- token = self.parseGlobal(token)
- return
- self.parseTopComment(self.top_comment)
+ self.error("token %s %s unexpected at the top level" % (
+ token[0], token[1]))
+ token = self.parseGlobal(token)
+ return
+ self.parseTopComment(self.top_comment)
return self.index
@@ -1618,449 +1618,449 @@ class docBuilder:
self.name = name
self.path = path
self.directories = directories
- self.includes = includes + included_files.keys()
- self.modules = {}
- self.headers = {}
- self.idx = index()
+ self.includes = includes + included_files.keys()
+ self.modules = {}
+ self.headers = {}
+ self.idx = index()
self.xref = {}
- self.index = {}
- self.basename = name
+ self.index = {}
+ self.basename = name
def indexString(self, id, str):
- if str == None:
- return
- str = string.replace(str, "'", ' ')
- str = string.replace(str, '"', ' ')
- str = string.replace(str, "/", ' ')
- str = string.replace(str, '*', ' ')
- str = string.replace(str, "[", ' ')
- str = string.replace(str, "]", ' ')
- str = string.replace(str, "(", ' ')
- str = string.replace(str, ")", ' ')
- str = string.replace(str, "<", ' ')
- str = string.replace(str, '>', ' ')
- str = string.replace(str, "&", ' ')
- str = string.replace(str, '#', ' ')
- str = string.replace(str, ",", ' ')
- str = string.replace(str, '.', ' ')
- str = string.replace(str, ';', ' ')
- tokens = string.split(str)
- for token in tokens:
- try:
- c = token[0]
- if string.find(string.letters, c) < 0:
- pass
- elif len(token) < 3:
- pass
- else:
- lower = string.lower(token)
- # TODO: generalize this a bit
- if lower == 'and' or lower == 'the':
- pass
- elif self.xref.has_key(token):
- self.xref[token].append(id)
- else:
- self.xref[token] = [id]
- except:
- pass
+ if str == None:
+ return
+ str = string.replace(str, "'", ' ')
+ str = string.replace(str, '"', ' ')
+ str = string.replace(str, "/", ' ')
+ str = string.replace(str, '*', ' ')
+ str = string.replace(str, "[", ' ')
+ str = string.replace(str, "]", ' ')
+ str = string.replace(str, "(", ' ')
+ str = string.replace(str, ")", ' ')
+ str = string.replace(str, "<", ' ')
+ str = string.replace(str, '>', ' ')
+ str = string.replace(str, "&", ' ')
+ str = string.replace(str, '#', ' ')
+ str = string.replace(str, ",", ' ')
+ str = string.replace(str, '.', ' ')
+ str = string.replace(str, ';', ' ')
+ tokens = string.split(str)
+ for token in tokens:
+ try:
+ c = token[0]
+ if string.find(string.letters, c) < 0:
+ pass
+ elif len(token) < 3:
+ pass
+ else:
+ lower = string.lower(token)
+ # TODO: generalize this a bit
+ if lower == 'and' or lower == 'the':
+ pass
+ elif self.xref.has_key(token):
+ self.xref[token].append(id)
+ else:
+ self.xref[token] = [id]
+ except:
+ pass
def analyze(self):
print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
- self.idx.analyze()
+ self.idx.analyze()
def scanHeaders(self):
- for header in self.headers.keys():
- parser = CParser(header)
- idx = parser.parse()
- self.headers[header] = idx;
- self.idx.merge(idx)
+ for header in self.headers.keys():
+ parser = CParser(header)
+ idx = parser.parse()
+ self.headers[header] = idx;
+ self.idx.merge(idx)
def scanModules(self):
- for module in self.modules.keys():
- parser = CParser(module)
- idx = parser.parse()
- # idx.analyze()
- self.modules[module] = idx
- self.idx.merge_public(idx)
+ for module in self.modules.keys():
+ parser = CParser(module)
+ idx = parser.parse()
+ # idx.analyze()
+ self.modules[module] = idx
+ self.idx.merge_public(idx)
def scan(self):
for directory in self.directories:
- files = glob.glob(directory + "/*.c")
- for file in files:
- skip = 1
- for incl in self.includes:
- if string.find(file, incl) != -1:
- skip = 0;
- break
- if skip == 0:
- self.modules[file] = None;
- files = glob.glob(directory + "/*.h")
- for file in files:
- skip = 1
- for incl in self.includes:
- if string.find(file, incl) != -1:
- skip = 0;
- break
- if skip == 0:
- self.headers[file] = None;
- self.scanHeaders()
- self.scanModules()
+ files = glob.glob(directory + "/*.c")
+ for file in files:
+ skip = 1
+ for incl in self.includes:
+ if string.find(file, incl) != -1:
+ skip = 0;
+ break
+ if skip == 0:
+ self.modules[file] = None;
+ files = glob.glob(directory + "/*.h")
+ for file in files:
+ skip = 1
+ for incl in self.includes:
+ if string.find(file, incl) != -1:
+ skip = 0;
+ break
+ if skip == 0:
+ self.headers[file] = None;
+ self.scanHeaders()
+ self.scanModules()
def modulename_file(self, file):
module = os.path.basename(file)
- if module[-2:] == '.h':
- module = module[:-2]
- elif module[-2:] == '.c':
- module = module[:-2]
- return module
+ if module[-2:] == '.h':
+ module = module[:-2]
+ elif module[-2:] == '.c':
+ module = module[:-2]
+ return module
def serialize_enum(self, output, name):
id = self.idx.enums[name]
output.write(" <enum name='%s' file='%s'" % (name,
- self.modulename_file(id.header)))
- if id.info != None:
- info = id.info
- if info[0] != None and info[0] != '':
- try:
- val = eval(info[0])
- except:
- val = info[0]
- output.write(" value='%s'" % (val));
- if info[2] != None and info[2] != '':
- output.write(" type='%s'" % info[2]);
- if info[1] != None and info[1] != '':
- output.write(" info='%s'" % escape(info[1]));
+ self.modulename_file(id.header)))
+ if id.info != None:
+ info = id.info
+ if info[0] != None and info[0] != '':
+ try:
+ val = eval(info[0])
+ except:
+ val = info[0]
+ output.write(" value='%s'" % (val));
+ if info[2] != None and info[2] != '':
+ output.write(" type='%s'" % info[2]);
+ if info[1] != None and info[1] != '':
+ output.write(" info='%s'" % escape(info[1]));
output.write("/>\n")
def serialize_macro(self, output, name):
id = self.idx.macros[name]
output.write(" <macro name='%s' file='%s'>\n" % (name,
- self.modulename_file(id.header)))
- if id.info != None:
+ self.modulename_file(id.header)))
+ if id.info != None:
try:
- (args, desc) = id.info
- if desc != None and desc != "":
- output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
- self.indexString(name, desc)
- for arg in args:
- (name, desc) = arg
- if desc != None and desc != "":
- output.write(" <arg name='%s' info='%s'/>\n" % (
- name, escape(desc)))
- self.indexString(name, desc)
- else:
- output.write(" <arg name='%s'/>\n" % (name))
+ (args, desc) = id.info
+ if desc != None and desc != "":
+ output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
+ self.indexString(name, desc)
+ for arg in args:
+ (name, desc) = arg
+ if desc != None and desc != "":
+ output.write(" <arg name='%s' info='%s'/>\n" % (
+ name, escape(desc)))
+ self.indexString(name, desc)
+ else:
+ output.write(" <arg name='%s'/>\n" % (name))
except:
pass
output.write(" </macro>\n")
def serialize_typedef(self, output, name):
id = self.idx.typedefs[name]
- if id.info[0:7] == 'struct ':
- output.write(" <struct name='%s' file='%s' type='%s'" % (
- name, self.modulename_file(id.header), id.info))
- name = id.info[7:]
- if self.idx.structs.has_key(name) and ( \
- type(self.idx.structs[name].info) == type(()) or
- type(self.idx.structs[name].info) == type([])):
- output.write(">\n");
- try:
- for field in self.idx.structs[name].info:
- desc = field[2]
- self.indexString(name, desc)
- if desc == None:
- desc = ''
- else:
- desc = escape(desc)
- output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
- except:
- print "Failed to serialize struct %s" % (name)
- output.write(" </struct>\n")
- else:
- output.write("/>\n");
- else :
- output.write(" <typedef name='%s' file='%s' type='%s'" % (
- name, self.modulename_file(id.header), id.info))
+ if id.info[0:7] == 'struct ':
+ output.write(" <struct name='%s' file='%s' type='%s'" % (
+ name, self.modulename_file(id.header), id.info))
+ name = id.info[7:]
+ if self.idx.structs.has_key(name) and ( \
+ type(self.idx.structs[name].info) == type(()) or
+ type(self.idx.structs[name].info) == type([])):
+ output.write(">\n");
+ try:
+ for field in self.idx.structs[name].info:
+ desc = field[2]
+ self.indexString(name, desc)
+ if desc == None:
+ desc = ''
+ else:
+ desc = escape(desc)
+ output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
+ except:
+ print "Failed to serialize struct %s" % (name)
+ output.write(" </struct>\n")
+ else:
+ output.write("/>\n");
+ else :
+ output.write(" <typedef name='%s' file='%s' type='%s'" % (
+ name, self.modulename_file(id.header), id.info))
try:
- desc = id.extra
- if desc != None and desc != "":
- output.write(">\n <info><![CDATA[%s]]></info>\n" % (desc))
- output.write(" </typedef>\n")
- else:
- output.write("/>\n")
- except:
- output.write("/>\n")
+ desc = id.extra
+ if desc != None and desc != "":
+ output.write(">\n <info><![CDATA[%s]]></info>\n" % (desc))
+ output.write(" </typedef>\n")
+ else:
+ output.write("/>\n")
+ except:
+ output.write("/>\n")
def serialize_variable(self, output, name):
id = self.idx.variables[name]
- if id.info != None:
- output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
- name, self.modulename_file(id.header), id.info))
- else:
- output.write(" <variable name='%s' file='%s'/>\n" % (
- name, self.modulename_file(id.header)))
+ if id.info != None:
+ output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
+ name, self.modulename_file(id.header), id.info))
+ else:
+ output.write(" <variable name='%s' file='%s'/>\n" % (
+ name, self.modulename_file(id.header)))
def serialize_function(self, output, name):
id = self.idx.functions[name]
- if name == debugsym:
- print "=>", id
+ if name == debugsym:
+ print "=>", id
output.write(" <%s name='%s' file='%s' module='%s'>\n" % (id.type,
- name, self.modulename_file(id.header),
- self.modulename_file(id.module)))
- #
- # Processing of conditionals modified by Bill 1/1/05
- #
- if id.conditionals != None:
- apstr = ""
- for cond in id.conditionals:
- if apstr != "":
- apstr = apstr + " && "
- apstr = apstr + cond
- output.write(" <cond>%s</cond>\n"% (apstr));
- try:
- (ret, params, desc) = id.info
- output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
- self.indexString(name, desc)
- if ret[0] != None:
- if ret[0] == "void":
- output.write(" <return type='void'/>\n")
- else:
- output.write(" <return type='%s' info='%s'/>\n" % (
- ret[0], escape(ret[1])))
- self.indexString(name, ret[1])
- for param in params:
- if param[0] == 'void':
- continue
- if param[2] == None:
- output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
- else:
- output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
- self.indexString(name, param[2])
- except:
- print "Failed to save function %s info: " % name, `id.info`
+ name, self.modulename_file(id.header),
+ self.modulename_file(id.module)))
+ #
+ # Processing of conditionals modified by Bill 1/1/05
+ #
+ if id.conditionals != None:
+ apstr = ""
+ for cond in id.conditionals:
+ if apstr != "":
+ apstr = apstr + " && "
+ apstr = apstr + cond
+ output.write(" <cond>%s</cond>\n"% (apstr));
+ try:
+ (ret, params, desc) = id.info
+ output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
+ self.indexString(name, desc)
+ if ret[0] != None:
+ if ret[0] == "void":
+ output.write(" <return type='void'/>\n")
+ else:
+ output.write(" <return type='%s' info='%s'/>\n" % (
+ ret[0], escape(ret[1])))
+ self.indexString(name, ret[1])
+ for param in params:
+ if param[0] == 'void':
+ continue
+ if param[2] == None:
+ output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
+ else:
+ output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
+ self.indexString(name, param[2])
+ except:
+ print "Failed to save function %s info: " % name, `id.info`
output.write(" </%s>\n" % (id.type))
def serialize_exports(self, output, file):
module = self.modulename_file(file)
- output.write(" <file name='%s'>\n" % (module))
- dict = self.headers[file]
- if dict.info != None:
- for data in ('Summary', 'Description', 'Author'):
- try:
- output.write(" <%s>%s</%s>\n" % (
- string.lower(data),
- escape(dict.info[data]),
- string.lower(data)))
- except:
- print "Header %s lacks a %s description" % (module, data)
- if dict.info.has_key('Description'):
- desc = dict.info['Description']
- if string.find(desc, "DEPRECATED") != -1:
- output.write(" <deprecated/>\n")
+ output.write(" <file name='%s'>\n" % (module))
+ dict = self.headers[file]
+ if dict.info != None:
+ for data in ('Summary', 'Description', 'Author'):
+ try:
+ output.write(" <%s>%s</%s>\n" % (
+ string.lower(data),
+ escape(dict.info[data]),
+ string.lower(data)))
+ except:
+ print "Header %s lacks a %s description" % (module, data)
+ if dict.info.has_key('Description'):
+ desc = dict.info['Description']
+ if string.find(desc, "DEPRECATED") != -1:
+ output.write(" <deprecated/>\n")
ids = dict.macros.keys()
- ids.sort()
- for id in uniq(ids):
- # Macros are sometime used to masquerade other types.
- if dict.functions.has_key(id):
- continue
- if dict.variables.has_key(id):
- continue
- if dict.typedefs.has_key(id):
- continue
- if dict.structs.has_key(id):
- continue
- if dict.enums.has_key(id):
- continue
- output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ # Macros are sometime used to masquerade other types.
+ if dict.functions.has_key(id):
+ continue
+ if dict.variables.has_key(id):
+ continue
+ if dict.typedefs.has_key(id):
+ continue
+ if dict.structs.has_key(id):
+ continue
+ if dict.enums.has_key(id):
+ continue
+ output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
ids = dict.enums.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
ids = dict.typedefs.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
ids = dict.structs.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
ids = dict.variables.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
ids = dict.functions.keys()
- ids.sort()
- for id in uniq(ids):
- output.write(" <exports symbol='%s' type='function'/>\n" % (id))
- output.write(" </file>\n")
+ ids.sort()
+ for id in uniq(ids):
+ output.write(" <exports symbol='%s' type='function'/>\n" % (id))
+ output.write(" </file>\n")
def serialize_xrefs_files(self, output):
headers = self.headers.keys()
headers.sort()
for file in headers:
- module = self.modulename_file(file)
- output.write(" <file name='%s'>\n" % (module))
- dict = self.headers[file]
- ids = uniq(dict.functions.keys() + dict.variables.keys() + \
- dict.macros.keys() + dict.typedefs.keys() + \
- dict.structs.keys() + dict.enums.keys())
- ids.sort()
- for id in ids:
- output.write(" <ref name='%s'/>\n" % (id))
- output.write(" </file>\n")
+ module = self.modulename_file(file)
+ output.write(" <file name='%s'>\n" % (module))
+ dict = self.headers[file]
+ ids = uniq(dict.functions.keys() + dict.variables.keys() + \
+ dict.macros.keys() + dict.typedefs.keys() + \
+ dict.structs.keys() + dict.enums.keys())
+ ids.sort()
+ for id in ids:
+ output.write(" <ref name='%s'/>\n" % (id))
+ output.write(" </file>\n")
pass
def serialize_xrefs_functions(self, output):
funcs = {}
- for name in self.idx.functions.keys():
- id = self.idx.functions[name]
- try:
- (ret, params, desc) = id.info
- for param in params:
- if param[0] == 'void':
- continue
- if funcs.has_key(param[0]):
- funcs[param[0]].append(name)
- else:
- funcs[param[0]] = [name]
- except:
- pass
- typ = funcs.keys()
- typ.sort()
- for type in typ:
- if type == '' or type == 'void' or type == "int" or \
- type == "char *" or type == "const char *" :
- continue
- output.write(" <type name='%s'>\n" % (type))
- ids = funcs[type]
- ids.sort()
- pid = '' # not sure why we have dups, but get rid of them!
- for id in ids:
- if id != pid:
- output.write(" <ref name='%s'/>\n" % (id))
- pid = id
- output.write(" </type>\n")
+ for name in self.idx.functions.keys():
+ id = self.idx.functions[name]
+ try:
+ (ret, params, desc) = id.info
+ for param in params:
+ if param[0] == 'void':
+ continue
+ if funcs.has_key(param[0]):
+ funcs[param[0]].append(name)
+ else:
+ funcs[param[0]] = [name]
+ except:
+ pass
+ typ = funcs.keys()
+ typ.sort()
+ for type in typ:
+ if type == '' or type == 'void' or type == "int" or \
+ type == "char *" or type == "const char *" :
+ continue
+ output.write(" <type name='%s'>\n" % (type))
+ ids = funcs[type]
+ ids.sort()
+ pid = '' # not sure why we have dups, but get rid of them!
+ for id in ids:
+ if id != pid:
+ output.write(" <ref name='%s'/>\n" % (id))
+ pid = id
+ output.write(" </type>\n")
def serialize_xrefs_constructors(self, output):
funcs = {}
- for name in self.idx.functions.keys():
- id = self.idx.functions[name]
- try:
- (ret, params, desc) = id.info
- if ret[0] == "void":
- continue
- if funcs.has_key(ret[0]):
- funcs[ret[0]].append(name)
- else:
- funcs[ret[0]] = [name]
- except:
- pass
- typ = funcs.keys()
- typ.sort()
- for type in typ:
- if type == '' or type == 'void' or type == "int" or \
- type == "char *" or type == "const char *" :
- continue
- output.write(" <type name='%s'>\n" % (type))
- ids = funcs[type]
- ids.sort()
- for id in ids:
- output.write(" <ref name='%s'/>\n" % (id))
- output.write(" </type>\n")
+ for name in self.idx.functions.keys():
+ id = self.idx.functions[name]
+ try:
+ (ret, params, desc) = id.info
+ if ret[0] == "void":
+ continue
+ if funcs.has_key(ret[0]):
+ funcs[ret[0]].append(name)
+ else:
+ funcs[ret[0]] = [name]
+ except:
+ pass
+ typ = funcs.keys()
+ typ.sort()
+ for type in typ:
+ if type == '' or type == 'void' or type == "int" or \
+ type == "char *" or type == "const char *" :
+ continue
+ output.write(" <type name='%s'>\n" % (type))
+ ids = funcs[type]
+ ids.sort()
+ for id in ids:
+ output.write(" <ref name='%s'/>\n" % (id))
+ output.write(" </type>\n")
def serialize_xrefs_alpha(self, output):
- letter = None
- ids = self.idx.identifiers.keys()
- ids.sort()
- for id in ids:
- if id[0] != letter:
- if letter != None:
- output.write(" </letter>\n")
- letter = id[0]
- output.write(" <letter name='%s'>\n" % (letter))
- output.write(" <ref name='%s'/>\n" % (id))
- if letter != None:
- output.write(" </letter>\n")
+ letter = None
+ ids = self.idx.identifiers.keys()
+ ids.sort()
+ for id in ids:
+ if id[0] != letter:
+ if letter != None:
+ output.write(" </letter>\n")
+ letter = id[0]
+ output.write(" <letter name='%s'>\n" % (letter))
+ output.write(" <ref name='%s'/>\n" % (id))
+ if letter != None:
+ output.write(" </letter>\n")
def serialize_xrefs_references(self, output):
typ = self.idx.identifiers.keys()
- typ.sort()
- for id in typ:
- idf = self.idx.identifiers[id]
- module = idf.header
- output.write(" <reference name='%s' href='%s'/>\n" % (id,
- 'html/' + self.basename + '-' +
- self.modulename_file(module) + '.html#' +
- id))
+ typ.sort()
+ for id in typ:
+ idf = self.idx.identifiers[id]
+ module = idf.header
+ output.write(" <reference name='%s' href='%s'/>\n" % (id,
+ 'html/' + self.basename + '-' +
+ self.modulename_file(module) + '.html#' +
+ id))
def serialize_xrefs_index(self, output):
index = self.xref
- typ = index.keys()
- typ.sort()
- letter = None
- count = 0
- chunk = 0
- chunks = []
- for id in typ:
- if len(index[id]) > 30:
- continue
- if id[0] != letter:
- if letter == None or count > 200:
- if letter != None:
- output.write(" </letter>\n")
- output.write(" </chunk>\n")
- count = 0
- chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
- output.write(" <chunk name='chunk%s'>\n" % (chunk))
- first_letter = id[0]
- chunk = chunk + 1
- elif letter != None:
- output.write(" </letter>\n")
- letter = id[0]
- output.write(" <letter name='%s'>\n" % (letter))
- output.write(" <word name='%s'>\n" % (id))
- tokens = index[id];
- tokens.sort()
- tok = None
- for token in tokens:
- if tok == token:
- continue
- tok = token
- output.write(" <ref name='%s'/>\n" % (token))
- count = count + 1
- output.write(" </word>\n")
- if letter != None:
- output.write(" </letter>\n")
- output.write(" </chunk>\n")
- if count != 0:
- chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
- output.write(" <chunks>\n")
- for ch in chunks:
- output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
- ch[0], ch[1], ch[2]))
- output.write(" </chunks>\n")
+ typ = index.keys()
+ typ.sort()
+ letter = None
+ count = 0
+ chunk = 0
+ chunks = []
+ for id in typ:
+ if len(index[id]) > 30:
+ continue
+ if id[0] != letter:
+ if letter == None or count > 200:
+ if letter != None:
+ output.write(" </letter>\n")
+ output.write(" </chunk>\n")
+ count = 0
+ chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
+ output.write(" <chunk name='chunk%s'>\n" % (chunk))
+ first_letter = id[0]
+ chunk = chunk + 1
+ elif letter != None:
+ output.write(" </letter>\n")
+ letter = id[0]
+ output.write(" <letter name='%s'>\n" % (letter))
+ output.write(" <word name='%s'>\n" % (id))
+ tokens = index[id];
+ tokens.sort()
+ tok = None
+ for token in tokens:
+ if tok == token:
+ continue
+ tok = token
+ output.write(" <ref name='%s'/>\n" % (token))
+ count = count + 1
+ output.write(" </word>\n")
+ if letter != None:
+ output.write(" </letter>\n")
+ output.write(" </chunk>\n")
+ if count != 0:
+ chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
+ output.write(" <chunks>\n")
+ for ch in chunks:
+ output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
+ ch[0], ch[1], ch[2]))
+ output.write(" </chunks>\n")
def serialize_xrefs(self, output):
- output.write(" <references>\n")
- self.serialize_xrefs_references(output)
- output.write(" </references>\n")
- output.write(" <alpha>\n")
- self.serialize_xrefs_alpha(output)
- output.write(" </alpha>\n")
- output.write(" <constructors>\n")
- self.serialize_xrefs_constructors(output)
- output.write(" </constructors>\n")
- output.write(" <functions>\n")
- self.serialize_xrefs_functions(output)
- output.write(" </functions>\n")
- output.write(" <files>\n")
- self.serialize_xrefs_files(output)
- output.write(" </files>\n")
- output.write(" <index>\n")
- self.serialize_xrefs_index(output)
- output.write(" </index>\n")
+ output.write(" <references>\n")
+ self.serialize_xrefs_references(output)
+ output.write(" </references>\n")
+ output.write(" <alpha>\n")
+ self.serialize_xrefs_alpha(output)
+ output.write(" </alpha>\n")
+ output.write(" <constructors>\n")
+ self.serialize_xrefs_constructors(output)
+ output.write(" </constructors>\n")
+ output.write(" <functions>\n")
+ self.serialize_xrefs_functions(output)
+ output.write(" </functions>\n")
+ output.write(" <files>\n")
+ self.serialize_xrefs_files(output)
+ output.write(" </files>\n")
+ output.write(" <index>\n")
+ self.serialize_xrefs_index(output)
+ output.write(" </index>\n")
def serialize(self):
filename = "%s/%s-api.xml" % (self.path, self.name)
@@ -2124,10 +2124,10 @@ def rebuild():
print "Rebuilding API description for libvirt"
builder = docBuilder("libvirt", srcdir,
["src", "src/util", "include/libvirt"],
- [])
+ [])
else:
print "rebuild() failed, unable to guess the module"
- return None
+ return None
builder.scan()
builder.analyze()
builder.serialize()
@@ -2146,4 +2146,4 @@ if __name__ == "__main__":
debug = 1
parse(sys.argv[1])
else:
- rebuild()
+ rebuild()
--
1.7.4.1
13 years, 9 months