'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
-import google.protobuf.descriptor_pb2 as descriptor
-import nanopb_pb2
+try:
+ import google.protobuf.descriptor_pb2 as descriptor
+except:
+ print
+ print "*************************************************************"
+ print "*** Could not import the Google protobuf Python libraries ***"
+ print "*** Try installing package 'python-protobuf' or similar. ***"
+ print "*************************************************************"
+ print
+ raise
+
+try:
+ import nanopb_pb2
+except:
+ print
+ print "***************************************************************"
+ print "*** Could not import the precompiled nanopb_pb2.py. ***"
+ print "*** Run 'make' in the 'generator' folder to update the file.***"
+ print "***************************************************************"
+ print
+ raise
+
import os.path
# Values are tuple (c type, pb ltype)
FieldD = descriptor.FieldDescriptorProto
datatypes = {
FieldD.TYPE_BOOL: ('bool', 'PB_LTYPE_VARINT'),
- FieldD.TYPE_DOUBLE: ('double', 'PB_LTYPE_FIXED'),
- FieldD.TYPE_FIXED32: ('uint32_t', 'PB_LTYPE_FIXED'),
- FieldD.TYPE_FIXED64: ('uint64_t', 'PB_LTYPE_FIXED'),
- FieldD.TYPE_FLOAT: ('float', 'PB_LTYPE_FIXED'),
+ FieldD.TYPE_DOUBLE: ('double', 'PB_LTYPE_FIXED64'),
+ FieldD.TYPE_FIXED32: ('uint32_t', 'PB_LTYPE_FIXED32'),
+ FieldD.TYPE_FIXED64: ('uint64_t', 'PB_LTYPE_FIXED64'),
+ FieldD.TYPE_FLOAT: ('float', 'PB_LTYPE_FIXED32'),
FieldD.TYPE_INT32: ('int32_t', 'PB_LTYPE_VARINT'),
FieldD.TYPE_INT64: ('int64_t', 'PB_LTYPE_VARINT'),
- FieldD.TYPE_SFIXED32: ('int32_t', 'PB_LTYPE_FIXED'),
- FieldD.TYPE_SFIXED64: ('int64_t', 'PB_LTYPE_FIXED'),
+ FieldD.TYPE_SFIXED32: ('int32_t', 'PB_LTYPE_FIXED32'),
+ FieldD.TYPE_SFIXED64: ('int64_t', 'PB_LTYPE_FIXED64'),
FieldD.TYPE_SINT32: ('int32_t', 'PB_LTYPE_SVARINT'),
FieldD.TYPE_SINT64: ('int64_t', 'PB_LTYPE_SVARINT'),
FieldD.TYPE_UINT32: ('uint32_t', 'PB_LTYPE_VARINT'),
elif desc.type == FieldD.TYPE_ENUM:
self.ltype = 'PB_LTYPE_VARINT'
self.ctype = names_from_type_name(desc.type_name)
- self.default = Names(self.ctype) + self.default
+ if self.default is not None:
+ self.default = self.ctype + self.default
elif desc.type == FieldD.TYPE_STRING:
self.ltype = 'PB_LTYPE_STRING'
if self.max_size is None:
result += '\n pb_membersize(%s, %s[0]),' % (self.struct_name, self.name)
result += ('\n pb_membersize(%s, %s) / pb_membersize(%s, %s[0]),'
% (self.struct_name, self.name, self.struct_name, self.name))
- elif self.ltype == 'PB_LTYPE_BYTES':
- result += '\n pb_membersize(%s, bytes),' % self.ctype
- result += ' 0,'
else:
result += '\n pb_membersize(%s, %s),' % (self.struct_name, self.name)
result += ' 0,'
self.fields = [Field(self.name, f) for f in desc.field]
self.ordered_fields = self.fields[:]
self.ordered_fields.sort()
-
- def __cmp__(self, other):
- '''Sort messages so that submessages are declared before the message
- that uses them.
- '''
- if self.refers_to(other.name):
- return 1
- elif other.refers_to(self.name):
- return -1
- else:
- return 0
-
- def refers_to(self, name):
- '''Returns True if this message uses the specified type as field type.'''
- for field in self.fields:
- if str(field.ctype) == str(name):
- return True
- return False
+
+ def get_dependencies(self):
+ '''Get list of type names that this structure refers to.'''
+ return [str(field.ctype) for field in self.fields]
def __str__(self):
result = 'typedef struct {\n'
enums = []
messages = []
+ if fdesc.package:
+ base_name = Names(fdesc.package.split('.'))
+ else:
+ base_name = Names()
+
for enum in fdesc.enum_type:
- enums.append(Enum(Names(), enum))
+ enums.append(Enum(base_name, enum))
- for names, message in iterate_messages(fdesc):
+ for names, message in iterate_messages(fdesc, base_name):
messages.append(Message(names, message))
for enum in message.enum_type:
enums.append(Enum(names, enum))
return enums, messages
-def generate_header(headername, enums, messages):
+def toposort2(data):
+ '''Topological sort.
+ From http://code.activestate.com/recipes/577413-topological-sort/
+ This function is under the MIT license.
+ '''
+ for k, v in data.items():
+ v.discard(k) # Ignore self dependencies
+ extra_items_in_deps = reduce(set.union, data.values(), set()) - set(data.keys())
+ data.update(dict([(item, set()) for item in extra_items_in_deps]))
+ while True:
+ ordered = set(item for item,dep in data.items() if not dep)
+ if not ordered:
+ break
+ for item in sorted(ordered):
+ yield item
+ data = dict([(item, (dep - ordered)) for item,dep in data.items()
+ if item not in ordered])
+ assert not data, "A cyclic dependency exists amongst %r" % data
+
+def sort_dependencies(messages):
+ '''Sort a list of Messages based on dependencies.'''
+ dependencies = {}
+ message_by_name = {}
+ for message in messages:
+ dependencies[str(message.name)] = set(message.get_dependencies())
+ message_by_name[str(message.name)] = message
+
+ for msgname in toposort2(dependencies):
+ if msgname in message_by_name:
+ yield message_by_name[msgname]
+
+def generate_header(dependencies, headername, enums, messages):
'''Generate content for a header file.
Generates strings, which should be concatenated and stored to file.
'''
yield '#define _PB_%s_\n' % symbol
yield '#include <pb.h>\n\n'
+ for dependency in dependencies:
+ noext = os.path.splitext(dependency)[0]
+ yield '#include "%s.pb.h"\n' % noext
+ yield '\n'
+
yield '/* Enum definitions */\n'
for enum in enums:
yield str(enum) + '\n\n'
yield '/* Struct definitions */\n'
- messages.sort()
- for msg in messages:
+ for msg in sort_dependencies(messages):
yield msg.types()
yield str(msg) + '\n\n'
print "Output fill be written to file.pb.h and file.pb.c"
sys.exit(1)
- data = open(sys.argv[1]).read()
+ data = open(sys.argv[1], 'rb').read()
fdesc = descriptor.FileDescriptorSet.FromString(data)
enums, messages = parse_file(fdesc.file[0])
print "Writing to " + headername + " and " + sourcename
+ # List of .proto files that should not be included in the C header file
+ # even if they are mentioned in the source .proto.
+ excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto']
+ dependencies = [d for d in fdesc.file[0].dependency if d not in excludes]
+
header = open(headername, 'w')
- for part in generate_header(headerbasename, enums, messages):
+ for part in generate_header(dependencies, headerbasename, enums, messages):
header.write(part)
source = open(sourcename, 'w')
for part in generate_source(headerbasename, enums, messages):
source.write(part)
-
\ No newline at end of file
+