Skip to content
2 changes: 2 additions & 0 deletions +file/Attribute.m
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
dtype; %type of value
dependent; %set externally. If the attribute is actually dependent on an untyped dataset/group
dependent_fullname; %set externally. This is the full name, including names of potential parent groups separated by underscore. A value will only be present if it would differ from dependent.
promoted_to_container = false; % set externally when promoted from a typed dataset onto the containing class API
scalar; %if the value is scalar or an array
dimnames;
shape;
Expand All @@ -25,6 +26,7 @@
obj.dtype = '';
obj.dependent = '';
obj.dependent_fullname = '';
obj.promoted_to_container = false;
obj.scalar = true;
obj.shape = {};
obj.dimnames = {};
Expand Down
32 changes: 31 additions & 1 deletion +file/Group.m
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,14 @@
PropertyMap = [PropertyMap; Sub_Attribute_Map];
end
PropertyMap(SubData.name) = SubData;
else
else % Typed dataset
includedAttributes = getIncludedTypedDatasetAttributes(obj, SubData);
if ~isempty(includedAttributes)
attrNames = {includedAttributes.name};
attrNames = strcat(SubData.name, '_', attrNames);
PropertyMap = [PropertyMap; ...
containers.Map(attrNames, num2cell(includedAttributes))];
end
if isempty(SubData.name)
PropertyMap(lower(SubData.type)) = SubData;
else
Expand Down Expand Up @@ -253,3 +260,26 @@
end
end
end

function includedAttributes = getIncludedTypedDatasetAttributes(GroupObj, datasetObj)
% getIncludedTypedDatasetAttributes - Return attributes declared on a named
% included typed dataset instance.
%
% This is used for reuse by inclusion (`neurodata_type_inc` without
% `neurodata_type_def`), where an existing typed dataset is embedded as a
% named component of another type. Promotion decisions are resolved later,
% once namespace context is available, so we can distinguish newly added
% attributes from modifications of attributes already defined on the
% included dataset type.

includedAttributes = file.Attribute.empty;
if isempty(GroupObj.type) || isempty(datasetObj.name) || isempty(datasetObj.attributes)
return;
end

for iAttr = 1:length(datasetObj.attributes)
attribute = datasetObj.attributes(iAttr);
attribute.dependent = datasetObj.name;
includedAttributes(end+1) = attribute; %#ok<AGROW>
end
end
3 changes: 2 additions & 1 deletion +file/fillClass.m
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
defaults = {};
dependent = {};
hidden = {}; % special hidden properties for hard-coded workarounds

%separate into readonly, required, and optional properties
for iGroup = 1:length(allProperties)
propertyName = allProperties{iGroup};
Expand Down Expand Up @@ -165,7 +166,7 @@
inherited);
setterFcns = file.fillSetters(setdiff(nonInherited, union(readonly, hiddenAndReadonly)), classprops);
validatorFcns = file.fillValidators(allProperties, classprops, namespace, namespace.getFullClassName(name), inherited);
exporterFcns = file.fillExport(nonInherited, class, superclassNames{1}, required);
exporterFcns = file.fillExport(nonInherited, class, superclassNames{1}, required, classprops);
methodBody = strjoin({constructorBody...
'%% SETTERS' setterFcns...
'%% VALIDATORS' validatorFcns...
Expand Down
19 changes: 18 additions & 1 deletion +file/fillExport.m
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
function festr = fillExport(propertyNames, RawClass, parentName, required)
function festr = fillExport(propertyNames, RawClass, parentName, required, classprops)
exportHeader = 'function refs = export(obj, fid, fullpath, refs)';
if isa(RawClass, 'file.Dataset')
propertyNames = propertyNames(~strcmp(propertyNames, 'data'));
Expand All @@ -22,6 +22,11 @@
propertyName = propertyNames{i};
pathProps = traverseRaw(propertyName, RawClass);
prop = pathProps{end};
if nargin >= 5 && isa(prop, 'file.Attribute') ...
&& isKey(classprops, propertyName) ...
&& isa(classprops(propertyName), 'file.Attribute')
prop = classprops(propertyName);
end
elideProps = pathProps(1:end-1);
elisions = cell(length(elideProps),1);
% Construct elisions
Expand Down Expand Up @@ -222,6 +227,7 @@

propertyChecks = {};
dependencyCheck = {};
preExportString = '';

if isa(prop, 'file.Attribute') && ~isempty(prop.dependent)
%if attribute is dependent, check before writing
Expand Down Expand Up @@ -254,6 +260,13 @@
warnIfMissingRequiredDependentAttributeStr = ...
sprintf('obj.throwErrorIfRequiredDependencyMissing(''%s'', ''%s'', fullpath)', name, depPropname);
end

if prop.promoted_to_container
preExportString = sprintf([ ...
'if isempty(obj.%1$s) && ~isempty(obj.%2$s) && isobject(obj.%2$s) && isprop(obj.%2$s, ''%3$s'') && ~isempty(obj.%2$s.%3$s)\n' ...
' obj.%1$s = obj.%2$s.%3$s;\n' ...
'end'], name, depPropname, prop.name);
end
end

if ~prop.required
Expand All @@ -273,6 +286,10 @@
end
end

if ~isempty(preExportString)
dataExportString = sprintf('%s\n%s', preExportString, dataExportString);
end

if ~isempty(dependencyCheck)
dataExportString = sprintf('%s\nif %s\n%s\nend', ...
dataExportString, ...
Expand Down
20 changes: 18 additions & 2 deletions +file/fillSetters.m
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,29 @@
warnIfDependencyMissingString = sprintf(...
'obj.warnIfAttributeDependencyMissing(''%s'', ''%s'')', ...
propname, parentname);

syncPromotedDatasetAttributeString = '';
if prop.promoted_to_container
syncPromotedDatasetAttributeString = sprintf([ ...
'if ~isempty(obj.%1$s) && isobject(obj.%1$s) && isprop(obj.%1$s, ''%2$s'')\n' ...
' if ~isempty(obj.%3$s)\n' ...
' obj.%1$s.%2$s = obj.%3$s;\n' ...
' elseif ~isempty(obj.%1$s.%2$s)\n' ...
' obj.%3$s = obj.%1$s.%2$s;\n' ...
' end\n' ...
'end'], parentname, prop.name, propname);
end

postsetFunctionStr = strjoin({...
postsetLines = {...
sprintf('function postset_%s(obj)', propname), ...
file.addSpaces(conditionStr, 4), ...
file.addSpaces(warnIfDependencyMissingString, 8), ...
file.addSpaces('end', 4), ...
'end'}, newline);
'end'};
if ~isempty(syncPromotedDatasetAttributeString)
postsetLines = [postsetLines(1:end-1), {file.addSpaces(syncPromotedDatasetAttributeString, 4)}, postsetLines(end)];
end
postsetFunctionStr = strjoin(postsetLines, newline);
end
end
end
78 changes: 77 additions & 1 deletion +file/processClass.m
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
class = patchVectorData(class);
end
props = class.getProps();
props = markPromotedAttributesForIncludedTypedDatasets(class, props, namespace);

% Apply patches for special cases of schema/specification errors
class = applySchemaVersionPatches(nodename, class, props, namespace);
Expand All @@ -53,6 +54,81 @@
end
end

function props = markPromotedAttributesForIncludedTypedDatasets(classObj, props, namespace)
if ~isa(classObj, 'file.Group') || isempty(classObj.datasets)
return;
end

for iDataset = 1:length(classObj.datasets)
datasetObj = classObj.datasets(iDataset);
if isempty(datasetObj.type) || isempty(datasetObj.name) || isempty(datasetObj.attributes)
continue;
end

datasetNamespace = namespace.getNamespace(datasetObj.type);
if isempty(datasetNamespace)
continue;
end

schemaAttributeNames = getSchemaDefinedAttributeNames(datasetObj.type, datasetNamespace);
for iAttr = 1:length(datasetObj.attributes)
attribute = datasetObj.attributes(iAttr);
propertyName = [datasetObj.name '_' attribute.name];
if ~isKey(props, propertyName)
continue;
end

if any(strcmp(attribute.name, schemaAttributeNames))
remove(props, propertyName);
else
promotedAttribute = props(propertyName);
promotedAttribute.promoted_to_container = true;
props(propertyName) = promotedAttribute;
end
end
end
end

function attributeNames = getSchemaDefinedAttributeNames(typeName, namespace)
persistent schemaAttributeNameCache

if isempty(schemaAttributeNameCache)
schemaAttributeNameCache = containers.Map('KeyType', 'char', 'ValueType', 'any');
end

cacheKey = strjoin({namespace.name, namespace.version, typeName}, '::');
if isKey(schemaAttributeNameCache, cacheKey)
attributeNames = schemaAttributeNameCache(cacheKey);
return;
end

typeSpec = namespace.getClass(typeName);
if isempty(typeSpec)
attributeNames = {};
return;
end

branch = [{typeSpec} namespace.getRootBranch(typeName)];
spec.internal.resolveInheritedFields(typeSpec, branch(2:end))
spec.internal.expandFieldsInheritedByInclusion(typeSpec)

switch typeSpec('class_type')
case 'groups'
classObj = file.Group(typeSpec);
case 'datasets'
classObj = file.Dataset(typeSpec);
otherwise
attributeNames = {};
return;
end

typeProps = classObj.getProps();
propNames = keys(typeProps);
isAttribute = cellfun(@(name) isa(typeProps(name), 'file.Attribute'), propNames);
attributeNames = propNames(isAttribute);
schemaAttributeNameCache(cacheKey) = attributeNames;
end

function class = patchVectorData(class)
%% Unit Attribute
% derived from schema 2.6.0
Expand Down Expand Up @@ -95,4 +171,4 @@
source('required') = false;

class.attributes(end+1) = file.Attribute(source);
end
end
29 changes: 23 additions & 6 deletions +tests/+system/UnitTimesIOTest.m
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
classdef UnitTimesIOTest < tests.system.PyNWBIOTest

Check failure on line 1 in +tests/+system/UnitTimesIOTest.m

View workflow job for this annotation

GitHub Actions / JUnit Test Report

UnitTimesIOTest.testInFromPyNWB

Verification failed in tests.system.UnitTimesIOTest/testInFromPyNWB. ----------------
Raw output
Verification failed in tests.system.UnitTimesIOTest/testInFromPyNWB.
    ----------------
    Test Diagnostic:
    ----------------
    Values for property 'waveforms_sampling_rate' are not equal
    ---------------------
    Framework Diagnostic:
    ---------------------
    verifyEqual failed.
    --> Sizes do not match.
        
        Actual size:
             0     0
        Expected size:
             1     1
    
    Actual Value:
         []
    Expected Value:
         1
    ------------------
    Stack Information:
    ------------------
    In /home/runner/work/matnwb/matnwb/+tests/+util/verifyContainerEqual.m (verifyContainerEqual) at 75
    In /home/runner/work/matnwb/matnwb/+tests/+system/PyNWBIOTest.m (PyNWBIOTest.testInFromPyNWB) at 35
methods
function addContainer(~, file)
file.units = types.core.Units( ...
Expand Down Expand Up @@ -40,17 +40,34 @@
, 'data', 1 ...
);

% set optional hidden vector data attributes
file.units.spike_times.resolution = 3;
% set optional Units table dataset attributes via promoted container API
file.units.spike_times_resolution = 3;
Units = file.units;
[Units.waveform_mean.sampling_rate ...
, Units.waveform_sd.sampling_rate ...
, Units.waveforms.sampling_rate ...
[Units.waveform_mean_sampling_rate ...
, Units.waveform_sd_sampling_rate ...
, Units.waveforms_sampling_rate ...
] = deal(1);
end

function c = getContainer(~, file)
c = file.units;
end
end
end

methods (Test)
function testLegacyNestedSpikeTimesResolutionIsPreserved(testCase)
spikeTimes = types.hdmf_common.VectorData( ...
'data', 11, ...
'description', 'the spike times for each unit in seconds');
spikeTimes.resolution = 1/20000;

units = types.core.Units( ...
'colnames', {'spike_times'}, ...
'description', 'data on spiking units', ...
'spike_times', spikeTimes);

testCase.verifyEqual(units.spike_times.resolution, 1/20000);
testCase.verifyEqual(units.spike_times_resolution, 1/20000);
end
end
end
Loading
Loading