Case Study III: Algo Rhythms Quartet No. 1

Case Study III: Algo Rhythms Quartet No. 1#

Algo Rhythms Quartet No. 1 is the first substantial config-driven score in the repository. Its role is clear in the project documentation: it is a proof of concept for a broader future composition system. The long-term direction is more general and eventually more post-tonal, but No. 1 is deliberately tonal. That choice simplifies the first experiment. It lets the project test rhythm, range handling, density control, engraving, and audio rendering before the harmonic language becomes more ambitious.

The score is defined through a TOML file and a generator package. The TOML file controls title, file naming, render settings, core generation constraints, pitch-class material, and instrumentation. The generator then creates quantized event streams for violin, viola, cello, and piano. The system enforces a small set of global rules: pitch choice must remain within a supplied pitch-class collection, melodic leaps are bounded, note and rest lengths are bounded, and the total sounding density is capped.

The quartet code relies on a small set of data classes. PartConfig describes one instrument entry from the TOML file. It carries the identifiers, instrument names, MIDI settings, and playable range. GenerationConfig holds the global generation rules such as measure count, durations, leap limits, and density limits. ProjectConfig is the top-level object passed through the package. It bundles title, output settings, pitch material, generation settings, render settings, and the list of parts.

The config loader is where these musical and technical settings become one internal project configuration:

Core quartet configuration data classes.#


@dataclass(frozen=True)
class PartConfig:
    id: str
    name: str
    short_name: str
    family: str
    clef: str
    midi_channel: int
    midi_program: int
    midi_instrument: str
    range_low: int
    range_high: int
    staff_type: str = "single"
    role: str = "melodic"


@dataclass(frozen=True)
class GenerationConfig:
    measures: int
    time_signature: tuple[int, int]
    min_note_quanta: int
    max_note_quanta: int
    min_rest_quanta: int
    max_rest_quanta: int
    max_simultaneous_tones_per_quantum: int
    max_pitch_leap: int
    seed: int
    tempo_bpm: int
    measure_quanta: int


@dataclass(frozen=True)
class RenderConfig:
    soundfont: str | None
    piano_soundfont: str | None
    strings_soundfont: str | None
    sample_rate: int


@dataclass(frozen=True)
class OutputConfig:
    basename: str
    label: str | None
    include_measures: bool
    include_tempo: bool
    include_seed: bool
    include_timestamp: bool
    timestamp_format: str

Loading the No. 1 configuration from TOML.#


def _default_midi_instrument(part_id: str, family: str, staff_type: str) -> str:
    if staff_type == "grand":
        return "acoustic grand"
    if part_id in {"violin", "viola", "cello"}:
        return part_id
    if family == "keyboard":
        return "acoustic grand"
    return "acoustic grand"


def load_config(path: str | Path) -> ProjectConfig:
    config_path = Path(path)
    with config_path.open("rb") as file_pointer:
        data = tomllib.load(file_pointer)

    generation_data = data["generation"]
    time_signature = tuple(generation_data.get("time_signature", [4, 4]))
    base_unit = Fraction(generation_data.get("min_note_duration", "1/16"))
    measure_length = Fraction(time_signature[0], time_signature[1])
    measure_quanta = _parse_duration_to_quanta(str(measure_length), base_unit)

    parts = []
    for entry in data["parts"]:
        pitch_range = entry["range"]
        parts.append(
            PartConfig(
                id=entry["id"],
                name=entry["name"],
                short_name=entry["short_name"],
                family=entry["family"],
                clef=entry.get("clef", "treble"),
                midi_channel=entry.get("midi_channel", 1),
                midi_program=entry.get("midi_program", 1),
                midi_instrument=entry.get(
                    "midi_instrument",
                    _default_midi_instrument(
                        entry["id"],
                        entry["family"],
                        entry.get("staff_type", "single"),
                    ),
                ),
                range_low=_parse_midi_pitch(pitch_range[0]),
                range_high=_parse_midi_pitch(pitch_range[1]),
                staff_type=entry.get("staff_type", "single"),
                role=entry.get("role", "melodic"),
            )
        )

    generation = GenerationConfig(
        measures=generation_data.get("measures", 4),
        time_signature=time_signature,
        min_note_quanta=_parse_duration_to_quanta(
            generation_data.get("min_note_duration", "1/16"),
            base_unit,
        ),
        max_note_quanta=_parse_duration_to_quanta(
            generation_data.get("max_note_duration", str(measure_length)),
            base_unit,
        ),
        min_rest_quanta=_parse_duration_to_quanta(
            generation_data.get("min_rest_duration", "1/16"),
            base_unit,
        ),
        max_rest_quanta=_parse_duration_to_quanta(
            generation_data.get("max_rest_duration", "1/4"),
            base_unit,
        ),
        max_simultaneous_tones_per_quantum=generation_data.get(
            "max_simultaneous_tones_per_quantum", 4
        ),
        max_pitch_leap=generation_data.get("max_pitch_leap", 5),
        seed=generation_data.get("seed", 17),
        tempo_bpm=generation_data.get("tempo_bpm", 72),
        measure_quanta=measure_quanta,
    )

    materials = data.get("materials", {})
    pitch_classes = tuple(materials.get("pitch_classes", [0, 1, 3, 6, 8, 10]))
    render_data = data.get("render", {})

    output_data = data.get("output", {})

    return ProjectConfig(
        title=data.get("title", "Untitled Piano Quartet"),
        composer=data.get("composer", "Unknown Composer"),
        output=OutputConfig(
            basename=output_data.get("basename", "algorithmic-piano-quartet"),
            label=output_data.get("label"),
            include_measures=output_data.get("include_measures", True),
            include_tempo=output_data.get("include_tempo", True),

The musical content is not chosen all at once. It is built event by event on a quantized timeline. The generator uses a few core classes here as well. Event represents a single timed unit with a start position, duration, and pitch. VoiceMaterial is the list of events for one staff. Piece is the generated score-level result, including the metadata needed later by the score layer. These classes are simple containers, but they matter because they define the internal shape of the generated material.

The result is not strict counterpoint or formal harmony. It is a constrained event generator designed to produce clear, playable material.

Download#

Listen#

Score Preview#

First page preview of Algo Rhythms Quartet No. 1

That event loop sits near the center of the package:

Core event generation for Quartet No. 1.#
        )
    elif config.render.soundfont:
        lines.append(f"Render: {_soundfont_name(config.render.soundfont)}")

    return tuple(lines)


def _generate_voice(
    staff_id: str,
    part_id: str,
    name: str,
    short_name: str,
    clef: str,
    midi_instrument: str,
    low: int,
    high: int,
    role: str,
    config: ProjectConfig,
    tracker: OccupancyTracker,
    rng: random.Random,
) -> VoiceMaterial:
    total_quanta = config.generation.measures * config.generation.measure_quanta
    note_min = config.generation.min_note_quanta
    note_max = config.generation.max_note_quanta
    rest_min = config.generation.min_rest_quanta
    rest_max = config.generation.max_rest_quanta
    max_pitch_leap = config.generation.max_pitch_leap + (2 if role == "bass" else 0)
    anchor = (low + high) // 2
    last_pitch: int | None = None
    cursor = 0
    events: list[Event] = []

    while cursor < total_quanta:
        measure_offset = cursor % config.generation.measure_quanta
        measure_remaining = config.generation.measure_quanta - measure_offset
        current_density = tracker.count(cursor)
        choose_rest = current_density >= config.generation.max_simultaneous_tones_per_quantum
        if not choose_rest:
            rest_probability = _rest_probability(
                role,
                current_density,
                config.generation.max_simultaneous_tones_per_quantum,
            )
            rest_probability += _ending_rest_boost(
                cursor=cursor,
                total_quanta=total_quanta,
                measure_quanta=config.generation.measure_quanta,
            )
            choose_rest = rng.random() < min(rest_probability, 0.95)

        if choose_rest:
            durations = _duration_candidates(role, rest_min, rest_max, measure_remaining)
            duration = rng.choice(durations)
            events.append(Event(start_quantum=cursor, duration_quanta=duration, pitch=None))
            cursor += duration
            continue

        durations = _duration_candidates(role, note_min, note_max, measure_remaining)
        if role == "bass":
            durations = [value for value in durations if value >= 2] or durations
        duration = rng.choice(durations)
        if not tracker.can_place(cursor, duration):
            rest_duration = min(measure_remaining, max(rest_min, 1))
            events.append(Event(start_quantum=cursor, duration_quanta=rest_duration, pitch=None))
            cursor += rest_duration
            continue

        candidates = _candidate_pitches(
            low=low,
            high=high,
            pitch_classes=config.pitch_classes,
            anchor=anchor,
            last_pitch=last_pitch,
            max_pitch_leap=max_pitch_leap,
        )
        window = min(6, len(candidates))
        pitch = rng.choice(candidates[:window]) if candidates else anchor
        tracker.occupy(cursor, duration)
        events.append(Event(start_quantum=cursor, duration_quanta=duration, pitch=pitch))
        last_pitch = pitch
        cursor += duration

    return VoiceMaterial(
        staff_id=staff_id,
        part_id=part_id,

Once events exist, the score layer turns them into Abjad objects, attaches dynamics and notation details, applies ottava marks, and assembles the final ensemble score. The system also adds a short end note that records the main compositional parameters for the generated run.

No. 1 also matters as a baseline for the second quartet. It shows the original generator design before the later work on chordal piano writing, separate piano occupancy, and hand-specific spacing.