PK W"SMa kll/__init__.py#!/usr/bin/env python3
'''
KLL Compiler
KLL - Keyboard Layout Language
'''
# Copyright (C) 2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
## Imports
import argparse
import os
import sys
import kll.common.stage as stage
## Variables
__version__ = '0.5.6.3'
kll_name = 'kll'
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
# Python Text Formatting Fixer...
# Because the creators of Python are averse to proper capitalization.
textFormatter_lookup = {
"usage: ": "\033[1mUsage\033[0m: ",
"optional arguments": "\033[1mOptional Arguments\033[0m",
}
def textFormatter_gettext(s):
return textFormatter_lookup.get(s, s)
argparse._ = textFormatter_gettext
### Misc Utility Functions ###
def git_revision(kll_path):
'''
Retrieve git information using given path
@param kll_path: Path to git directory
@return: (revision, changed, revision_date, long_version)
'''
import git
# Default values if git is not available
revision = ""
changed = []
date = ""
long_version = ""
# Just in case git can't be found
try:
# Initialize repo
repo = git.Repo(kll_path)
# Get hash of the latest git commit
revision = repo.head.object.hexsha
# Get list of files that have changed since the commit
changed = [item.a_path for item in repo.index.diff(None)] + [item.a_path for item in repo.index.diff('HEAD')]
# Get commit date
date = repo.head.commit.committed_datetime
long_version = ".{0} - {1}".format(revision, date)
except git.exc.InvalidGitRepositoryError:
pass
return revision, changed, date, long_version
### Argument Parsing ###
def checkFileExists(filename):
'''
Validate that file exists
@param filename: Path to file
'''
if not os.path.isfile(filename):
print("{0} {1} does not exist...".format(ERROR, filename))
sys.exit(1)
def command_line_args(control, input_args):
'''
Initialize argparse and process all command line arguments
@param control: ControlStage object which has access to all the group argument parsers
'''
# Setup argument processor
parser = argparse.ArgumentParser(
usage="{} [options..] [..]".format(kll_name),
description="KLL Compiler - Generates specified output from KLL .kll files.",
epilog="Example: {0} scan_map.kll".format(kll_name),
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
)
# Install path
install_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# Get git information
control.git_rev, control.git_changes, control.git_date, long_version = git_revision(
os.path.join(install_path, '..')
)
control.version = "{0}{1}".format(__version__, long_version)
# Optional Arguments
parser.add_argument(
'-h', '--help',
action="help",
help="This message."
)
parser.add_argument(
'-v', '--version',
action="version",
version="{0} {1}".format(kll_name, control.version),
help="Show program's version number and exit"
)
parser.add_argument(
'--path',
action="store_true",
help="Shows the absolute path to the kll compiler installation directory. Then exits.",
)
parser.add_argument(
'--layout-cache-path',
action="store_true",
help="Shows the absolute path to the kll layouts cache director. Then exits.",
)
parser.add_argument(
'--layout-cache-refresh',
action="store_true",
help="Does a refresh on the kll layouts cache. Then exits. Don't do this too often or you'll get GitHub RateLimit Errors.",
)
# Add stage arguments
control.command_line_flags(parser)
# Process Arguments
args = parser.parse_args(input_args)
# If --path defined, lookup installation path, then exit
if args.path:
print(install_path)
sys.exit(0)
# If --layout-cache-path defined, lookup cache directory for layouts cache, then exit
if args.layout_cache_path:
import layouts
mgr = layouts.Layouts()
layout_path = mgr.layout_path
print(layout_path)
sys.exit(0)
# If --layout-cache-refresh defined, show the refreshed layout path
if args.layout_cache_refresh:
import layouts
mgr = layouts.Layouts(force_refresh=True)
layout_path = mgr.layout_path
print(layout_path)
sys.exit(0)
# Utilize parsed arguments in each of the stages
control.command_line_args(args)
### Main Entry Point ###
def main(args):
# Initialize Control Stages
control = stage.ControlStage()
# Process Command-Line Args
command_line_args(control, args)
# Process Control Stages
control.process()
# Successful completion
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
PK RKMu kll/__main__.py#!/usr/bin/env python3
'''
KLL Compiler
Keyboard Layout Langauge
'''
# Copyright (C) 2014-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Paths ###
import os
import sys
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.insert(0, path)
### Imports ###
import kll
### Main Entry Point ###
if __name__ == '__main__':
# See __init__.py
kll.main(sys.argv[1:])
PK RKMu kll/kll#!/usr/bin/env python3
'''
KLL Compiler
Keyboard Layout Langauge
'''
# Copyright (C) 2014-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Paths ###
import os
import sys
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.insert(0, path)
### Imports ###
import kll
### Main Entry Point ###
if __name__ == '__main__':
# See __init__.py
kll.main(sys.argv[1:])
PK RKMI kll/common/README.md# KLL Compiler - common
This is where the bulk of the KLL compiler processing occurs.
Including all of the datastructures used to contain the parsed expressions.
## Files
Brief description of each of the files.
### Process
Files that deal with file and expression processing.
Including parsing and tokenization.
* [emitter.py](emitter.py) - Base classes for [KLL emitters](../emitters).
* [parse.py](parse.py) - Contains most of the KLL xBNF parsing rules and how to map them to datastructure.
* [stage.py](stage.py) - Handles each stage of KLL file processing, from file reading to emitter output. This is where to start if you're unsure.
### Datastructure
Datastructure assembly classes used to contain KLL data.
* [channel.py](channel.py) - Container classes for KLL pixel channels.
* [context.py](context.py) - Container classes for KLL contexts (e.g. Generic, Configuration, BaseMap, DefaultMap, PartialMap and Merge).
* [expression.py](expression.py) - Container classes for KLL expressions (e.g. MapExpression, etc.).
* [file.py](file.py) - Container class for reading kll files.
* [id.py](id.py) - Container classes for KLL id elements (e.g. HIDId, PixelId, ScanCodeId, NoneId, etc.).
* [modifier.py](modifier.py) - Container classes for KLL modifiers (e.g. AnimationModifier, PixelModifier, etc.).
* [organization.py](organization.py) - Container classes for expression organizations. Handles expression merging.
* [position.py](position.py) - Container class for physical KLL positions.
* [schedule.py](schedule.py) - Container class for KLL schedules and schedule parameters.
### Constants
Lookup tables and other static data.
* [hid_dict.py](hid_dict.py) - Dictionary lookup for USB HID codes (both symbolic and numeric).
PK RKM kll/common/__init__.pyPK RKM܄ kll/common/channel.py#!/usr/bin/env python3
'''
KLL Channel Containers
'''
# Copyright (C) 2016-2017 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Channel:
'''
Pixel Channel Container
'''
def __init__(self, uid, width):
self.uid = uid
self.width = width
def __repr__(self):
return "{0}:{1}".format(self.uid, self.width)
class ChannelList:
'''
Pixel Channel List Container
'''
def __init__(self):
self.channels = []
def setChannels(self, channel_list):
'''
Apply channels to Pixel
'''
for channel in channel_list:
self.channels.append(Channel(channel[0], channel[1]))
def strChannels(self):
'''
__repr__ of Channel when multiple inheritance is used
'''
output = ""
for index, channel in enumerate(self.channels):
if index > 0:
output += ","
output += "{0}".format(channel)
return output
def __repr__(self):
return self.strChannels()
PK RKMZLE" E" kll/common/context.py#!/usr/bin/env python3
'''
KLL Context Definitions
* Generic (auto-detected)
* Configuration
* BaseMap
* DefaultMap
* PartialMap
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import copy
import os
import kll.common.organization as organization
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Context:
'''
Base KLL Context Class
'''
def __init__(self):
'''
Context initialization
'''
# Each context may have one or more included kll files
# And each of these files will have at least 1 Context
self.kll_files = []
# File data assigned to each context
# This info is populated during the PreprocessorStage
self.lines = []
self.data = ""
self.parent = None
# Tokenized data sets
self.classification_token_data = []
self.expressions = []
# Organized Expression Datastructure
self.organization = organization.Organization(self)
# Layer Information (unset, unless a PartialMapContext)
self.layer = None
# Connect Id information (unset, but usually initialized)
self.connect_id = None
# HID Mapping object
self.hid_mapping = None
def __repr__(self):
# Build list of all the info
return "(kll_files={}, hid_mapping={}, lines={}, data='''{}''')".format(
self.kll_files,
self.hid_mapping,
self.lines,
self.data,
)
def layer_info(self):
'''
Returns a text string indicating which layer this is
'''
if self.layer is None:
return "0"
return "{}".format(self.layer + 1)
def initial_context(self, lines, data, parent):
'''
Used in the PreprocessorStage to update the initial line and kll file data
@param lines: Data split per line
@param data: Entire context in a single string
@param parent: Parent node, always a KLLFile
'''
self.lines = lines
self.data = data
self.parent = parent
self.connect_id = parent.connect_id
def query(self, kll_expression, kll_type=None):
'''
Query
Returns a dictionary of the specified property.
Most queries should use this.
See organization.py:Organization for property_type details.
@param kll_expression: String name of expression type
@param kll_type: String name of the expression sub-type
If set to None, return all
@return: context_name: (dictionary)
'''
if kll_type is None:
return self.organization.data_mapping[kll_expression]
return self.organization.data_mapping[kll_expression][kll_type]
class GenericContext(Context):
'''
Generic KLL Context Class
'''
class ConfigurationContext(Context):
'''
Configuration KLL Context Class
'''
class BaseMapContext(Context):
'''
Base Map KLL Context Class
'''
class DefaultMapContext(Context):
'''
Default Map KLL Context Class
'''
class PartialMapContext(Context):
'''
Partial Map KLL Context Class
'''
def __init__(self, layer):
'''
Partial Map Layer Context Initialization
@param: Layer associated with Partial Map
'''
super().__init__()
self.layer = layer
class MergeContext(Context):
'''
Container class for a merged Context
Has references to the original contexts merged in
'''
def __init__(self, base_context):
'''
Initialize the MergeContext with the base context
Another MergeContext can be used as the base_context
@param base_context: Context used to seed the MergeContext
'''
super().__init__()
# Setup list of kll_files
self.kll_files = base_context.kll_files
# Transfer layer, whenever merging in, we'll use the new layer identifier
self.layer = base_context.layer
# List of context, in the order of merging, starting from the base
self.contexts = [base_context]
# Copy the base context Organization into the MergeContext
self.organization = copy.copy(base_context.organization)
self.organization.parent = self
# Set the layer if the base is a PartialMapContext
if base_context.__class__.__name__ == 'PartialMapContext':
self.layer = base_context.layer
def merge(self, merge_in, map_type, debug):
'''
Merge in context
Another MergeContext can be merged into a MergeContext
@param merge_in: Context to merge in to this one
@param map_type: Used for map specific merges (e.g. BaseMap reductions)
@param debug: Enable debug out
'''
# Extend list of kll_files
self.kll_files.extend(merge_in.kll_files)
# Use merge_in layer identifier as the master (most likely to be correct)
self.layer = merge_in.layer
# Append to context list
self.contexts.append(merge_in)
# Merge context
self.organization.merge(
merge_in.organization,
map_type,
debug
)
# Set the layer if the base is a PartialMapContext
if merge_in.__class__.__name__ == 'PartialMapContext':
self.layer = merge_in.layer
def cleanup(self, debug=False):
'''
Post-processing step for merges that may need to remove some data in the organization.
Mainly used for dropping BaseMapContext expressions after generating a PartialMapContext.
'''
self.organization.cleanup(debug)
def reduction(self, debug=False):
'''
Simplifies datastructure
NOTE: This will remove data, therefore, context is lost
'''
self.organization.reduction(debug)
def paths(self):
'''
Returns list of file paths used to generate this context
'''
file_paths = []
for kll_context in self.contexts:
# If context is a MergeContext then we have to recursively search
if kll_context.__class__.__name__ is 'MergeContext':
file_paths.extend(kll_context.paths())
else:
file_paths.append(kll_context.parent.path)
return file_paths
def files(self):
'''
Short form list of file paths used to generate this context
'''
file_paths = []
for file_path in self.paths():
file_paths.append(os.path.basename(file_path))
return file_paths
def __repr__(self):
return "(kll_files={0}, organization={1})".format(
self.files(),
self.organization,
)
def query_contexts(self, kll_expression, kll_type):
'''
Context Query
Returns a list of tuples (dictionary, kll_context) doing a deep search to the context leaf nodes.
This results in pre-merge data and is useful for querying information about files used during compilation.
See organization.py:Organization for property_type details.
@param kll_expression: String name of expression type
@param kll_type: String name of the expression sub-type
@return: context_name: (dictionary, kll_context)
'''
# Build list of leaf contexts
leaf_contexts = []
for kll_context in self.contexts:
# Recursively search if necessary
if kll_context.__class__.__name__ == 'MergeContext':
leaf_contexts.extend(
kll_context.query_contexts(
kll_expression, kll_type))
else:
leaf_contexts.append((
kll_context.query(
kll_expression,
kll_type
),
kll_context
))
return leaf_contexts
PK RKM kll/common/emitter.py#!/usr/bin/env python3
'''
KLL Emitter Base Classes
'''
# Copyright (C) 2016-2017 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import json
import os
import re
import sys
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Emitter:
'''
KLL Emitter Base Class
NOTE: Emitter should do as little as possible in the __init__ function.
'''
def __init__(self, control):
'''
Emitter initialization
@param control: ControlStage object, used to access data from other stages
'''
self.control = control
self.color = False
# Signal erroring due to an issue
# We may not want to exit immediately as we could find other potential
# issues that need fixing
self.error_exit = False
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
print("{0} '{1}' '{2}' has not been implemented yet"
.format(
WARNING,
self.command_line_args.__name__,
type(self).__name__
)
)
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
print("{0} '{1}' '{2}' has not been implemented yet"
.format(
WARNING,
self.command_line_flags.__name__,
type(self).__name__
)
)
def process(self):
'''
Emitter Processing
'''
print("{0} '{1}' '{2}' has not been implemented yet"
.format(
WARNING,
self.process.__name__,
type(self).__name__
)
)
def output(self):
'''
Final Stage of Emitter
Generate desired outputs
'''
print("{0} '{1}' '{2}' has not been implemented yet"
.format(
WARNING,
self.output.__name__,
type(self).__name__
)
)
def check(self):
'''
Determines whether or not we've successfully emitted.
'''
return not self.error_exit
class FileEmitter:
'''
KLL File Emitter Class
Base class for any emitter that wants to output a file.
Generally, it is recommended to use the TextEmitter as templates are more readable.
'''
def __init__(self):
'''
FileEmitter Initialization
'''
self.output_files = []
def generate(self, output_path):
'''
Generate output file
@param contents: String contents of file
@param output_path: Path to output file
'''
for name, contents in self.output_files:
with open("{0}/{1}".format(output_path, name), 'w') as outputFile:
outputFile.write(contents)
class TextEmitter:
'''
KLL Text Emitter Class
Base class for any text emitter that wants to use the templating functionality
If multiple files need to be generated, call load_template and generate multiple times.
e.g.
load_template('_myfile.h')
generate('/tmp/myfile.h')
load_template('_myfile2.h')
generate('/tmp/myfile2.h')
TODO
- Generate list of unused tags
'''
def __init__(self):
'''
TextEmitter Initialization
'''
# Dictionary used to do template replacements
self.fill_dict = {}
self.tag_list = []
self.template = None
def load_template(self, template):
'''
Loads template file
Looks for <|tags|> to replace in the template
@param template: Path to template
'''
# Does template exist?
if not os.path.isfile(template):
print("{0} '{1}' does not exist...".format(ERROR, template))
sys.exit(1)
self.template = template
# Generate list of fill tags
with open(template, 'r') as openFile:
for line in openFile:
match = re.findall(r'<\|([^|>]+)\|>', line)
for item in match:
self.tag_list.append(item)
def generate(self, output_path):
'''
Generates the output file from the template file
@param output_path: Path to the generated file
'''
# Make sure we've called load_template at least once
if self.template is None:
print(
"{0} TextEmitter template (load_template) has not been called.".format(ERROR))
sys.exit(1)
# Process each line of the template, outputting to the target path
with open(output_path, 'w') as outputFile:
with open(self.template, 'r') as templateFile:
for line in templateFile:
# TODO Support multiple replacements per line
# TODO Support replacement with other text inline
match = re.findall(r'<\|([^|>]+)\|>', line)
# If match, replace with processed variable
if match:
try:
outputFile.write(self.fill_dict[match[0]])
except KeyError as err:
print("{0} '{1}' not found, skipping...".format(
WARNING, match[0]
))
outputFile.write("\n")
# Otherwise, just append template to output file
else:
outputFile.write(line)
class JsonEmitter:
'''
'''
def __init__(self):
'''
JsonEmitter Initialization
'''
self.json_dict = {}
def generate_json(self, output_path):
'''
Generates the output json file using an self.json_dict
'''
output = json.dumps(self.json_dict, indent=4, sort_keys=True)
# Write json file
with open(output_path, 'w') as outputFile:
outputFile.write(output)
PK RKMDn n kll/common/expression.py#!/usr/bin/env python3
'''
KLL Expression Container
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import copy
from kll.common.id import CapId
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Expression:
'''
Container class for KLL expressions
'''
def __init__(self, lparam, operator, rparam, context):
'''
Initialize expression container
@param lparam: LOperatorData token
@param operator: Operator token
@param rparam: ROperatorData token
@param context: Parent context of expression
'''
# First stage/init
self.lparam_token = lparam
self.operator_token = operator
self.rparam_token = rparam
self.context = context # TODO, set multiple contexts for later stages
# Second stage
self.lparam_sub_tokens = []
self.rparam_sub_tokens = []
# BaseMap expression
self.base_map = False
# Default ConnectId
self.connect_id = 0
# Mutate class into the desired type
self.__class__ = {
'=>': NameAssociationExpression,
'<=': DataAssociationExpression,
'=': AssignmentExpression,
':': MapExpression,
}[self.operator_type()]
def operator_type(self):
'''
Determine which base operator this operator is of
All : (map) expressions are tokenized/parsed the same way
@return Base string representation of the operator
'''
if ':' in self.operator_token.value:
return ':'
return self.operator_token.value
def final_tokens(self, no_filter=False):
'''
Return the final list of tokens, must complete the second stage first
@param no_filter: If true, do not filter out Space tokens
@return Finalized list of tokens
'''
ret = self.lparam_sub_tokens + \
[self.operator_token] + self.rparam_sub_tokens
if not no_filter:
ret = [x for x in ret if x.type != 'Space']
return ret
def regen_str(self):
'''
Re-construct the string based off the original set of tokens
;
'''
return "{0}{1}{2};".format(
self.lparam_token.value,
self.operator_token.value,
self.rparam_token.value,
)
def point_chars(self, pos_list):
'''
Using the regenerated string, point to a given list of characters
Used to indicate where a possible issue/syntax error is
@param pos_list: List of character indices
i.e.
> U"A" : : U"1";
> ^
'''
out = "\t{0}\n\t".format(self.regen_str())
# Place a ^ character at the given locations
curpos = 1
for pos in sorted(pos_list):
# Pad spaces, then add a ^
out += ' ' * (pos - curpos)
out += '^'
curpos += pos
return out
def rparam_start(self):
'''
Starting positing char of rparam_token in a regen_str
'''
return len(self.lparam_token.value) + len(self.operator_token.value)
def __repr__(self):
# Build string representation based off of what has been set
# lparam, operator and rparam are always set
out = "Expression: {0}{1}{2}".format(
self.lparam_token.value,
self.operator_token.value,
self.rparam_token.value,
)
# TODO - Add more depending on what has been set
return out
def kllify(self):
'''
Returns KLL version of the expression
May not look like the original expression if simplication has taken place
'''
print(
"{0} kllify not defined for {1}".format(
WARNING,
self.__class__.__name__))
out = "{0}{1}{2};".format(
self.lparam_token.value,
self.operator_token.value,
self.rparam_token.value,
)
return out
def unique_keys(self):
'''
Generates a list of unique identifiers for the expression that is mergeable
with other functional equivalent expressions.
This method should never get called directly as a generic Expression
'''
return [('UNKNOWN KEY', 'UNKNOWN EXPRESSION')]
class AssignmentExpression(Expression):
'''
Container class for assignment KLL expressions
'''
type = None
name = None
pos = None
value = None
## Setters ##
def array(self, name, pos, value):
'''
Assign array assignment parameters to expression
@param name: Name of variable
@param pos: Array position of the value (if None, overwrite the entire array)
@param value: Value of the array, if pos is specified, this is the value of an element
@return: True if parsing was successful
'''
self.type = 'Array'
self.name = name
self.pos = pos
self.value = value
# If pos is not none, flatten
if pos is not None:
self.value = "".join(str(x) for x in self.value)
return True
def merge_array(self, new_expression=None):
'''
Merge arrays, used for position assignments
Merges unconditionally, make sure this is what you want to do first
If no additional array is specified, just "cap-off" array.
This does a proper array expansion into a python list.
@param new_expression: AssignmentExpression type array, ignore if None
'''
# First, check if base expression needs to be capped
if self.pos is not None:
# Generate a new string array
new_value = [""] * self.pos
# Append the old contents to the list
new_value.append(self.value)
self.value = new_value
# Clear pos, to indicate that array has been capped
self.pos = None
# Next, if a new_expression has been specified, merge in
if new_expression is not None and new_expression.pos is not None:
# Check if we need to extend the list
new_size = new_expression.pos + 1 - len(self.value)
if new_size > 0:
self.value.extend([""] * new_size)
# Assign value to array
self.value[new_expression.pos] = new_expression.value
def variable(self, name, value):
'''
Assign variable assignment parameters to expression
@param name: Name of variable
@param value: Value of variable
@return: True if parsing was successful
'''
self.type = 'Variable'
self.name = name
self.value = value
# Flatten value, often a list of various token types
self.value = "".join(str(x) for x in self.value)
return True
def __repr__(self):
if self.type == 'Variable':
return "{0} = {1};".format(self.name, self.value)
elif self.type == 'Array':
# Output KLL style array, double quoted elements, space-separated
if isinstance(self.value, list):
output = "{0}[] =".format(self.name)
for value in self.value:
output += ' "{0}"'.format(value)
output += ";"
return output
# Single array assignment
else:
return "{0}[{1}] = {2};".format(
self.name, self.pos, self.value)
return "ASSIGNMENT UNKNOWN"
def kllify(self):
'''
Returns KLL version of the expression
May not look like the original expression if simplication has taken place
__repr__ is formatted correctly with assignment expressions
'''
return self.__repr__()
def unique_keys(self):
'''
Generates a list of unique identifiers for the expression that is mergeable
with other functional equivalent expressions.
'''
return [(self.name, self)]
class NameAssociationExpression(Expression):
'''
Container class for name association KLL expressions
'''
type = None
name = None
association = None
## Setters ##
def capability(self, name, association, parameters):
'''
Assign a capability C function name association
@param name: Name of capability
@param association: Name of capability in target backend output
@return: True if parsing was successful
'''
self.type = 'Capability'
self.name = name
self.association = CapId(association, 'Definition', parameters)
return True
def define(self, name, association):
'''
Assign a define C define name association
@param name: Name of variable
@param association: Name of association in target backend output
@return: True if parsing was successful
'''
self.type = 'Define'
self.name = name
self.association = association
return True
def __repr__(self):
return "{0} <= {1};".format(self.name, self.association)
def kllify(self):
'''
Returns KLL version of the expression
May not look like the original expression if simplication has taken place
'''
return "{0}".format(self)
def unique_keys(self):
'''
Generates a list of unique identifiers for the expression that is mergeable
with other functional equivalent expressions.
'''
return [(self.name, self)]
class DataAssociationExpression(Expression):
'''
Container class for data association KLL expressions
'''
type = None
association = None
value = None
## Setters ##
def animation(self, animations, animation_modifiers):
'''
Animation definition and configuration
@return: True if parsing was successful
'''
self.type = 'Animation'
self.association = animations
self.value = animation_modifiers
return True
def animationFrame(self, animation_frames, pixel_modifiers):
'''
Pixel composition of an Animation Frame
@return: True if parsing was successful
'''
self.type = 'AnimationFrame'
self.association = animation_frames
self.value = pixel_modifiers
return True
def pixelPosition(self, pixels, position):
'''
Pixel Positioning
@return: True if parsing was successful
'''
for pixel in pixels:
pixel.setPosition(position)
self.type = 'PixelPosition'
self.association = pixels
return True
def scanCodePosition(self, scancodes, position):
'''
Scan Code to Position Mapping
Note: Accepts lists of scan codes
Alone this isn't useful, but you can assign rows and columns using ranges instead of individually
@return: True if parsing was successful
'''
for scancode in scancodes:
scancode.setPosition(position)
self.type = 'ScanCodePosition'
self.association = scancodes
return True
def update(self, new_expression):
'''
Update expression
@param new_expression: Expression used to update this one
'''
supported = ['PixelPosition', 'ScanCodePosition']
if new_expression.type in supported:
for scancode in self.association:
scancode.updatePositions(new_expression.association[0])
def __repr__(self):
if self.type in ['PixelPosition', 'ScanCodePosition']:
output = ""
for index, association in enumerate(self.association):
if index > 0:
output += "; "
output += "{0}".format(association)
return "{0};".format(output)
return "{0} <= {1};".format(self.association, self.value)
def kllify(self):
'''
Returns KLL version of the expression
May not look like the original expression if simplication has taken place
__repr__ is formatted correctly with assignment expressions
'''
if self.type in ['PixelPosition', 'ScanCodePosition']:
output = ""
for index, association in enumerate(self.association):
if index > 0:
output += "; "
output += "{0}".format(association.kllify())
return "{0};".format(output)
if self.type in ['AnimationFrame']:
output = "{0} <= ".format(self.association[0].kllify())
for index, association in enumerate(self.value):
if index > 0:
output += ", "
output += "{0}".format(association[0].kllify())
return "{0};".format(output)
return "{0} <= {1};".format(
self.association.kllify(), self.value.kllify())
def unique_keys(self):
'''
Generates a list of unique identifiers for the expression that is mergeable
with other functional equivalent expressions.
'''
keys = []
# Positions require a bit more introspection to get the unique keys
if self.type in ['PixelPosition', 'ScanCodePosition']:
for index, key in enumerate(self.association):
uniq_expr = self
# If there is more than one key, copy the expression
# and remove the non-related variants
if len(self.association) > 1:
uniq_expr = copy.copy(self)
# Isolate variant by index
uniq_expr.association = [uniq_expr.association[index]]
keys.append(("{0}".format(key.unique_key()), uniq_expr))
# AnimationFrames are already list of keys
# TODO Reorder frame assignments to dedup function equivalent mappings
elif self.type in ['AnimationFrame']:
for index, key in enumerate(self.association):
uniq_expr = self
# If there is more than one key, copy the expression
# and remove the non-related variants
if len(self.association) > 1:
uniq_expr = copy.copy(self)
# Isolate variant by index
uniq_expr.association = [uniq_expr.association[index]]
keys.append(("{0}".format(key), uniq_expr))
# Otherwise treat as a single element
else:
keys = [("{0}".format(self.association), self)]
# Remove any duplicate keys
# TODO Stat? Might be at neat report about how many duplicates were
# squashed
keys = list(set(keys))
return keys
class MapExpression(Expression):
'''
Container class for KLL map expressions
'''
type = None
triggers = None
operator = None
results = None
animation = None
animation_frame = None
pixels = None
position = None
trigger_identifiers = ['IndCode', 'GenericTrigger', 'Layer', 'LayerLock', 'LayerShift', 'LayerLatch', 'ScanCode']
def __init__(self, triggers, operator, results):
'''
Initialize MapExpression
Used when copying MapExpressions from different expressions
@param triggers: Sequence of combos of ranges of namedtuples
@param operator: Type of map operation
@param results: Sequence of combos of ranges of namedtuples
'''
self.type = 'TriggerCode'
self.triggers = triggers
self.operator = operator
self.results = results
self.connect_id = 0
## Setters ##
def triggerCode(self, triggers, operator, results):
'''
Trigger Code mapping
Takes in any combination of triggers and sets the expression accordingly.
@param triggers: Sequence of combos of ranges of namedtuples
@param operator: Type of map operation
@param results: Sequence of combos of ranges of namedtuples
@return: True if parsing was successful
'''
self.type = 'TriggerCode'
self.triggers = triggers
self.operator = operator
self.results = results
return True
def pixelChannels(self, pixelmap, trigger):
'''
Pixel Channel Composition
@return: True if parsing was successful
'''
self.type = 'PixelChannel'
self.pixel = pixelmap
self.position = trigger
return True
def triggersSequenceOfCombosOfIds(self, index=0):
'''
Takes triggers and converts into explicit ids
Only uses the first index by default.
@param index: Which trigger sequence to expand
@return: list of lists
Example (index=0)
[[[S10, S16], [S42]], [[S11, S16], [S42]]] -> [[10, 16], [42]]
'''
nsequence = []
for combo in self.triggers[index]:
ncombo = []
for identifier in combo:
ncombo.append(identifier.json())
nsequence.append(ncombo)
return nsequence
def resultsSequenceOfCombosOfIds(self, index=0):
'''
Takes results and converts into explicit capabilities
Only uses the first index by default.
@param index: Which result sequence to expand
@return: list of lists
'''
nsequence = []
for combo in self.results[index]:
ncombo = []
for identifier in combo:
ncombo.append(identifier.json())
nsequence.append(ncombo)
return nsequence
def sequencesOfCombosOfIds(self, expression_param):
'''
Prettified Sequence of Combos of Identifiers
@param expression_param: Trigger or Result parameter of an expression
Scan Code Example
[[[S10, S16], [S42]], [[S11, S16], [S42]]] -> (S10 + S16, S42)|(S11 + S16, S42)
'''
output = ""
# Sometimes during error cases, might be None
if expression_param is None:
return output
# Iterate over each trigger/result variants (expanded from ranges),
# each one is a sequence
for index, sequence in enumerate(expression_param):
if index > 0:
output += "|"
output += "("
# Iterate over each combo (element of the sequence)
for index, combo in enumerate(sequence):
if index > 0:
output += ", "
# Iterate over each trigger identifier
for index, identifier in enumerate(combo):
if index > 0:
output += " + "
output += "{0}".format(identifier)
output += ")"
return output
def sequencesOfCombosOfIds_kll(self, expression_param):
'''
Prettified Sequence of Combos of Identifiers, kll output edition
@param expression_param: Trigger or Result parameter of an expression
Scan Code Example
[[[S10, S16], [S42]], [[S11, S16], [S42]]] -> ['S10 + S16, S42', 'S11 + S16, S42']
'''
output = ['']
# Sometimes during error cases, might be None
if expression_param is None:
return output
# Iterate over each trigger/result variants (expanded from ranges),
# each one is a sequence
for index, sequence in enumerate(expression_param):
if index > 0:
output.append('')
# Iterate over each combo (element of the sequence)
for index, combo in enumerate(sequence):
if index > 0:
output[-1] += ", "
# Iterate over each trigger identifier
for index, identifier in enumerate(combo):
if index > 0:
output[-1] += " + "
output[-1] += "{0}".format(identifier.kllify())
return output
def trigger_id_list(self):
'''
Returns a list of ids within the sequence of combos
May contain duplicates
'''
id_list = []
# Iterate over each trigger/result variants (expanded from ranges)
for sequence in self.triggers:
# Iterate over each combo (element of the sequence)
for combo in sequence:
# Iterate over each trigger identifier
for identifier in combo:
id_list.append(identifier)
return id_list
def min_trigger_uid(self):
'''
Returns the min numerical uid
Used for trigger identifiers
'''
min_uid = 0xFFFF
# Iterate over list of identifiers in trigger
for identifier in self.trigger_id_list():
if identifier.type in self.trigger_identifiers and identifier.get_uid() < min_uid:
min_uid = identifier.get_uid()
return min_uid
def max_trigger_uid(self):
'''
Returns the max numerical uid
Used for trigger identifiers
'''
max_uid = 0
# Iterate over list of identifiers in trigger
for identifier in self.trigger_id_list():
if identifier.type in self.trigger_identifiers and identifier.get_uid() > max_uid:
max_uid = identifier.get_uid()
return max_uid
def add_trigger_uid_offset(self, offset):
'''
Adds a uid/scancode offset to all triggers
This is used when applying the connect_id interconnect offset during mapping indices generation
'''
# Iterate over list of identifiers in trigger
for identifier in self.trigger_id_list():
if identifier.type == 'ScanCode':
identifier.updated_uid = identifier.uid + offset
def elems(self):
'''
Return number of trigger and result elements
Useful for determining if this is a trigger macro (2+)
Should always return at least (1,1) unless it's an invalid calculation
@return: ( triggers, results )
'''
elems = [0, 0]
# XXX Needed?
if self.type == 'PixelChannel':
return tuple(elems)
# Iterate over each trigger variant (expanded from ranges), each one is
# a sequence
for sequence in self.triggers:
# Iterate over each combo (element of the sequence)
for combo in sequence:
# Just measure the size of the combo
elems[0] += len(combo)
# Iterate over each result variant (expanded from ranges), each one is
# a sequence
for sequence in self.results:
# Iterate over each combo (element of the sequence)
for combo in sequence:
# Just measure the size of the combo
elems[1] += len(combo)
return tuple(elems)
def trigger_str(self):
'''
String version of the trigger
Used for sorting
'''
# Pixel Channel Mapping doesn't follow the same pattern
if self.type == 'PixelChannel':
return "{0}".format(self.pixel)
return "{0}".format(
self.sequencesOfCombosOfIds(self.triggers),
)
def result_str(self):
'''
String version of the result
Used for sorting
'''
# Pixel Channel Mapping doesn't follow the same pattern
if self.type == 'PixelChannel':
return "{0}".format(self.position)
return "{0}".format(
self.sequencesOfCombosOfIds(self.results),
)
def __repr__(self):
# Pixel Channel Mapping doesn't follow the same pattern
if self.type == 'PixelChannel':
return "{0} : {1};".format(self.pixel, self.position)
return "{0} {1} {2};".format(
self.sequencesOfCombosOfIds(self.triggers),
self.operator,
self.sequencesOfCombosOfIds(self.results),
)
def sort_trigger(self):
'''
Returns sortable trigger
'''
if self.type == 'PixelChannel':
return "{0}".format(self.pixel.kllify())
return "{0}".format(
self.sequencesOfCombosOfIds_kll(self.triggers)[0],
)
def sort_result(self):
'''
Returns sortable result
'''
if self.type == 'PixelChannel':
result = self.position
# Handle None pixel mapping case
if isinstance(self.position, list):
result = self.sequencesOfCombosOfIds_kll(self.position)[0]
return "{0}".format(result)
return "{0}".format(
self.sequencesOfCombosOfIds_kll(self.results)[0],
)
def kllify(self):
'''
Returns KLL version of the expression
May not look like the original expression if simplication has taken place
'''
# TODO Handle variations? Instead of just the first index
if self.type == 'PixelChannel':
result = self.position
# Handle None pixel mapping case
if isinstance(self.position, list):
result = self.sequencesOfCombosOfIds_kll(self.position)[0]
return "{0} : {1};".format(self.pixel.kllify(), result)
return "{0} {1} {2};".format(
self.sequencesOfCombosOfIds_kll(self.triggers)[0],
self.operator,
self.sequencesOfCombosOfIds_kll(self.results)[0],
)
def unique_keys(self):
'''
Generates a list of unique identifiers for the expression that is mergeable
with other functional equivalent expressions.
TODO: This function should re-order combinations to generate the key.
The final generated combo will be in the original order.
'''
keys = []
# Pixel Channel only has key per mapping
if self.type == 'PixelChannel':
keys = [("{0}".format(self.pixel), self)]
# Split up each of the keys
else:
# Iterate over each trigger/result variants (expanded from ranges),
# each one is a sequence
for index, sequence in enumerate(self.triggers):
key = ""
uniq_expr = self
# If there is more than one key, copy the expression
# and remove the non-related variants
if len(self.triggers) > 1:
uniq_expr = copy.copy(self)
# Isolate variant by index
uniq_expr.triggers = [uniq_expr.triggers[index]]
# Iterate over each combo (element of the sequence)
for index, combo in enumerate(sequence):
if index > 0:
key += ", "
# Iterate over each trigger identifier
for index, identifier in enumerate(combo):
if index > 0:
key += " + "
key += "{0} {1}".format(self.connect_id, identifier)
# Add key to list
keys.append((key, uniq_expr))
# Remove any duplicate keys
# TODO Stat? Might be at neat report about how many duplicates were
# squashed
keys = list(set(keys))
return keys
PK RKMЕ
kll/common/file.py#!/usr/bin/env python3
'''
KLL File Container
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import os
import kll.common.context as context
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class KLLFile:
'''
Container class for imported KLL files
'''
def __init__(self, path, file_context):
'''
Initialize file container
@param path: Path to filename, if relative, relative to the execution environment
@param context: KLL Context object
'''
self.path = path
self.context = file_context
self.lines = []
self.data = ""
self.connect_id = None
# Add filename to context for debugging
self.context.kll_files.append(self.filename())
def __repr__(self):
context_str = type(self.context).__name__
# Show layer info if this is a PartialMap
if isinstance(self.context, context.PartialMapContext):
context_str = "{0}({1})".format(context_str, self.context.layer)
return "({}, {}, connect_id={})".format(
self.path,
context_str,
self.connect_id
)
def check(self):
'''
Make sure that the file exists at the initialized path
'''
exists = os.path.isfile(self.path)
# Display error message, will exit later
if not exists:
print("{0} {1} does not exist...".format(ERROR, self.path))
return exists
def filename(self):
filename = str(os.path.basename(self.path))
return filename
def read(self):
'''
Read the contents of the file path into memory
Reads both per line and complete copies
'''
try:
# Read file into memory, removing newlines
with open(self.path) as f:
self.data = f.read()
self.lines = self.data.splitlines()
except BaseException:
print(
"{0} Failed to read '{1}' into memory...".format(
ERROR, self.path))
return False
return True
def write(self, output_filename, debug=False):
'''
Writes the contents to a file
This can be useful for dumping processed files to disk
'''
try:
# Write the file to the specified file/folder
if debug:
print("Writing to {0}".format(output_filename))
directory = os.path.dirname(output_filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(output_filename, 'w') as f:
f.write(self.data)
except BaseException:
print("{0} Failed to write to file '{1}'".format(ERROR, self.path))
return False
return True
PK SM>\ >\ kll/common/id.py#!/usr/bin/env python3
'''
KLL Id Containers
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
from kll.common.channel import ChannelList
from kll.common.modifier import AnimationModifierList, PixelModifierList
from kll.common.position import Position
from kll.common.schedule import Schedule
from kll.extern.funcparserlib.parser import NoParseError
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Id:
'''
Base container class for various KLL types
'''
def __init__(self):
self.type = None
self.uid = None
def get_uid(self):
'''
Some Id types have alternate uid mappings
self.uid stores the original uid whereas it may be updated due to multi-node configurations
'''
return self.uid
def json(self):
'''
JSON representation of Id
Generally each specialization of the Id class will need to enhance this function.
'''
return {
'type' : self.type,
'uid' : self.uid,
}
def kllify(self):
'''
Returns KLL version of the Id
In most cases we can just the string representation of the object
'''
return "{0}".format(self)
class HIDId(Id, Schedule):
'''
HID/USB identifier container class
'''
secondary_types = {
'USBCode': 'USB',
'SysCode': 'SYS',
'ConsCode': 'CONS',
'IndCode': 'IND',
}
kll_types = {
'USBCode': 'U',
'SysCode': 'SYS',
'ConsCode': 'CONS',
'IndCode': 'I',
}
type_width = {
'USBCode': 1,
'SysCode': 1,
'ConsCode': 2,
'IndCode': 1,
}
type_locale = {
'USBCode': 'to_hid_keyboard',
'SysCode': 'to_hid_sysctrl',
'ConsCode': 'to_hid_consumer',
'IndCode': 'to_hid_led',
}
def __init__(self, type, uid, locale):
'''
@param type: String type of the Id
@param uid: Unique integer identifier for the Id
@param locale: Locale layout object used to decode used to decode uid
'''
Id.__init__(self)
Schedule.__init__(self)
self.type = type
self.uid = uid
self.locale = locale
self.locale_type = self.type_locale[self.type]
# Set secondary type
self.second_type = self.secondary_types[self.type]
# Set kll type
self.kll_type = self.kll_types[self.type]
# Determine hex_str padding
self.padding = 2
if self.type == 'ConsCode':
self.padding = 3
# Validate uid is in locale based on what type of HID field it is
if self.hex_str() not in self.locale.json()[self.locale_type].keys():
print("{} Unknown HID({}) UID('{}') in locale '{}'".format(
WARNING,
self.type,
self.uid,
self.locale.name()
))
def hex_str(self):
'''
Returns hex string used by locale for uid lookup
'''
return "0x{0:0{1}X}".format(self.uid, self.padding)
def get_hex_str(self):
'''
Returns hex string used by locale for uid lookup, uses self.get_uid() instead of self.uid
'''
return "0x{0:0{1}X}".format(self.get_uid(), self.padding)
def width(self):
'''
Returns the bit width of the HIDId
This is the maximum number of bytes required for each type of HIDId as per the USB spec.
Generally this is just 1 byte, however, Consumer elements (ConsCode) requires 2 bytes.
'''
return self.type_width[self.type]
def __repr__(self):
'''
Use string name instead of integer, easier to debug
'''
try:
name = self.locale.json()[self.locale_type][self.hex_str()]
schedule = self.strSchedule()
if len(schedule) > 0:
schedule = "({0})".format(schedule)
output = 'HID({},{})"{}"{}{}'.format(self.type, self.locale.name(), self.uid, name, schedule)
return output
except:
print("{} '{}' is an invalid dictionary lookup.".format(
WARNING,
(self.second_type, self.uid),
))
return ""
def json(self):
'''
JSON representation of HIDId
'''
output = Id.json(self)
output.update(Schedule.json(self))
return output
def kllify(self):
'''
Returns KLL version of the Id
'''
schedule = self.strSchedule()
if len(schedule) > 0:
schedule = "({0})".format(schedule)
output = "{0}{1:#05x}{2}".format(self.kll_type, self.uid, schedule)
return output
class ScanCodeId(Id, Schedule, Position):
'''
Scan Code identifier container class
'''
def __init__(self, uid):
Id.__init__(self)
Schedule.__init__(self)
Position.__init__(self)
self.type = 'ScanCode'
self.uid = uid
# This uid is used for any post-processing of the uid
# The original uid is maintained in case it is needed
self.updated_uid = None
def inferred_type(self):
'''
Always returns ScanCode (simplifies code when mixed with PixelAddressId)
'''
return 'PixelAddressId_ScanCode'
def get_uid(self):
'''
Determine uid
May have been updated due to connect_id setting for interconnect offsets
'''
uid = self.uid
if self.updated_uid is not None:
uid = self.updated_uid
return uid
def uid_set(self):
'''
Returns a tuple of uids, always a single element for ScanCodeId
'''
return tuple([self.get_uid()])
def unique_key(self):
'''
Returns the key string used for datastructure sorting
'''
# Positions are a special case
if self.positionSet():
return "S{0:03d}".format(self.get_uid())
def __repr__(self):
# Positions are a special case
if self.positionSet():
return "{0} <= {1}".format(self.unique_key(), self.strPosition())
schedule = self.strSchedule()
if len(schedule) > 0:
return "S{0:03d}({1})".format(self.get_uid(), schedule)
else:
return "S{0:03d}".format(self.get_uid())
def json(self):
'''
JSON representation of ScanCodeId
'''
output = Id.json(self)
output.update(Schedule.json(self))
output.update(Position.json(self))
return output
def kllify(self):
'''
Returns KLL version of the Id
'''
schedule = self.strSchedule()
if len(schedule) > 0:
schedule = "({0})".format(schedule)
output = "S{0:#05x}{1}".format(self.get_uid(), schedule)
# Position enabled
if self.isPositionSet():
output += " <= {0}".format(self.strPosition())
return output
class LayerId(Id, Schedule):
'''
Layer identifier container class
'''
def __init__(self, type, layer):
Id.__init__(self)
Schedule.__init__(self)
self.type = type
self.uid = layer
def __repr__(self):
schedule = self.strSchedule()
if len(schedule) > 0:
return "{0}[{1}]({2})".format(
self.type,
self.uid,
schedule,
)
else:
return "{0}[{1}]".format(
self.type,
self.uid,
)
def width(self):
'''
Returns the bit width of the LayerId
This is currently 2 bytes.
'''
return 2
def json(self):
'''
JSON representation of LayerId
'''
output = Id.json(self)
output.update(Schedule.json(self))
return output
def kllify(self):
'''
Returns KLL version of the Id
'''
# The string __repr__ is KLL in this case
return str(self)
class TriggerId(Id, Schedule):
'''
Generic trigger identifier container class
'''
def __init__(self, idcode, uid):
Id.__init__(self)
Schedule.__init__(self)
self.type = 'GenericTrigger'
self.uid = uid
self.idcode = idcode
def __repr__(self):
schedule = self.strSchedule()
schedule_val = ""
if len(schedule) > 0:
schedule_val = "({})".format(schedule)
return "T[{0},{1}]{2}".format(
self.idcode,
self.uid,
schedule_val,
)
def json(self):
'''
JSON representation of TriggerId
'''
output = Id.json(self)
output.update(Schedule.json(self))
return output
def kllify(self):
'''
Returns KLL version of the Id
'''
# The string __repr__ is KLL in this case
return str(self)
class AnimationId(Id, Schedule, AnimationModifierList):
'''
Animation identifier container class
'''
name = None
def __init__(self, name, state=None):
Id.__init__(self)
Schedule.__init__(self)
AnimationModifierList.__init__(self)
self.name = name
self.type = 'Animation'
self.second_type = 'A'
self.state = state
def __repr__(self):
state = ""
if self.state is not None:
state = ", {}".format(self.state)
schedule = self.strSchedule()
if len(schedule) > 0:
return "A[{0}{1}]({2})".format(self.name, state, self.strSchedule())
if len(self.modifiers) > 0:
return "A[{0}{1}]({2})".format(self.name, state, self.strModifiers())
return self.base_repr()
def base_repr(self):
'''
Returns string of just the identifier, exclude animation modifiers
'''
state = ""
if self.state is not None:
state = ", {}".format(self.state)
return "A[{0}{1}]".format(self.name, state)
def width(self):
'''
Returns the bit width of the AnimationId
This is currently 2 bytes.
'''
return 2
def json(self):
'''
JSON representation of AnimationId
'''
output = Id.json(self)
output.update(AnimationModifierList.json(self))
output.update(Schedule.json(self))
output['name'] = self.name
output['setting'] = "{}".format(self)
output['state'] = self.state
del output['uid']
return output
class AnimationFrameId(Id, AnimationModifierList):
'''
Animation Frame identifier container class
'''
def __init__(self, name, index):
Id.__init__(self)
AnimationModifierList.__init__(self)
self.name = name
self.index = index
self.type = 'AnimationFrame'
def __repr__(self):
return "AF[{0}, {1}]".format(self.name, self.index)
def kllify(self):
'''
Returns KLL version of the Id
'''
return "A[{0}, {1}]".format(self.name, self.index)
class PixelId(Id, Position, PixelModifierList, ChannelList):
'''
Pixel identifier container class
'''
def __init__(self, uid):
Id.__init__(self)
Position.__init__(self)
PixelModifierList.__init__(self)
ChannelList.__init__(self)
self.uid = uid
self.type = 'Pixel'
def unique_key(self, kll=False):
'''
Returns the key string used for datastructure sorting
@param kll: Kll output format mode
'''
if isinstance(self.uid, HIDId) or isinstance(self.uid, ScanCodeId):
if kll:
return "{0}".format(self.uid.kllify())
return "P[{0}]".format(self.uid)
if isinstance(self.uid, PixelAddressId):
if kll:
return "P[{0}]".format(self.uid.kllify())
return "P[{0}]".format(self.uid)
if kll:
return "P{0:#05x}".format(self.uid)
return "P{0}".format(self.uid)
def __repr__(self):
# Positions are a special case
if self.positionSet():
return "{0} <= {1}".format(self.unique_key(), self.strPosition())
extra = ""
if len(self.modifiers) > 0:
extra += "({0})".format(self.strModifiers())
if len(self.channels) > 0:
extra += "({0})".format(self.strChannels())
return "{0}{1}".format(self.unique_key(), extra)
def kllify(self):
'''
KLL syntax compatible output for Pixel object
'''
# Positions are a special case
if self.positionSet():
return "{0} <= {1}".format(self.unique_key(kll=True), self.strPosition())
extra = ""
if len(self.modifiers) > 0:
extra += "({0})".format(self.strModifiers())
if len(self.channels) > 0:
extra += "({0})".format(self.strChannels())
return "{0}{1}".format(self.unique_key(kll=True), extra)
class PixelAddressId(Id):
'''
Pixel address identifier container class
'''
def __init__(self, index=None, col=None, row=None, relCol=None, relRow=None):
Id.__init__(self)
# Check to make sure index, col or row is set
if index is None and col is None and row is None and relRow is None and relCol is None:
print("{0} index, col or row must be set".format(ERROR))
self.index = index
self.col = col
self.row = row
self.relCol = relCol
self.relRow = relRow
self.type = 'PixelAddress'
def inferred_type(self):
'''
Determine which PixelAddressType based on set values
'''
if self.index is not None:
return 'PixelAddressId_Index'
if self.col is not None and self.row is None:
return 'PixelAddressId_ColumnFill'
if self.col is None and self.row is not None:
return 'PixelAddressId_RowFill'
if self.col is not None and self.row is not None:
return 'PixelAddressId_Rect'
if self.relCol is not None and self.relRow is None:
return 'PixelAddressId_RelativeColumnFill'
if self.relCol is None and self.relRow is not None:
return 'PixelAddressId_RelativeRowFill'
if self.relCol is not None and self.relRow is not None:
return 'PixelAddressId_RelativeRect'
print("{0} Unknown PixelAddressId, this is a bug!".format(ERROR))
return ""
def uid_set(self):
'''
Returns a tuple of uids, depends on what has been set.
'''
if self.index is not None:
return tuple([self.index])
if self.col is not None and self.row is None:
return tuple([self.col, self.row])
if self.col is None and self.row is not None:
return tuple([self.col, self.row])
if self.col is not None and self.row is not None:
return tuple([self.col, self.row])
if self.relCol is not None and self.relRow is None:
return tuple([self.relCol, self.relRow])
if self.relCol is None and self.relRow is not None:
return tuple([self.relCol, self.relRow])
if self.relCol is not None and self.relRow is not None:
return tuple([self.relCol, self.relRow])
print("{0} Unknown uid set, this is a bug!".format(ERROR))
return "= 0 and "+" or ""
cur_out += "{0}".format(self.valueStr(self.relRow))
output.append(cur_out)
if not self.relCol is None:
cur_out = "c:i"
cur_out += self.relCol >= 0 and "+" or ""
cur_out += "{0}".format(self.valueStr(self.relCol))
output.append(cur_out)
return output
def __repr__(self):
return "{0}".format(self.outputStrList())
def kllify(self):
'''
KLL syntax compatible output for PixelAddress object
'''
return ",".join(self.outputStrList())
class PixelLayerId(Id, PixelModifierList):
'''
Pixel Layer identifier container class
'''
def __init__(self, uid):
Id.__init__(self)
PixelModifierList.__init__(self)
self.uid = uid
self.type = 'PixelLayer'
def __repr__(self):
if len(self.modifiers) > 0:
return "PL{0}({1})".format(self.uid, self.strModifiers())
return "PL{0}".format(self.uid)
class CapId(Id):
'''
Capability identifier
'''
def __init__(self, name, type, arg_list=[]):
'''
@param name: Name of capability
@param type: Type of capability definition, string
@param arg_list: List of CapArgIds, empty list if there are none
'''
Id.__init__(self)
self.name = name
self.type = type
self.arg_list = arg_list
def __repr__(self):
# Generate prettified argument list
arg_string = ""
for arg in self.arg_list:
arg_string += "{0},".format(arg)
if len(arg_string) > 0:
arg_string = arg_string[:-1]
return "{0}({1})".format(self.name, arg_string)
def json(self):
'''
JSON representation of CapId
'''
return {
'type' : self.type,
'name' : self.name,
'args' : [arg.json() for arg in self.arg_list]
}
def total_arg_bytes(self, capabilities_dict=None):
'''
Calculate the total number of bytes needed for the args
@param capabilities_dict: Dictionary of capabilities used, just in case no widths have been assigned
return: Number of bytes
'''
# Zero if no args
total_bytes = 0
for index, arg in enumerate(self.arg_list):
# Lookup actual width if necessary (wasn't set explicitly)
if capabilities_dict is not None and (arg.type == 'CapArgValue' or arg.width is None):
# Check if there are enough arguments
expected = len(capabilities_dict[self.name].association.arg_list)
got = len(self.arg_list)
if got != expected:
print("{0} incorrect number of arguments for {1}. Expected {2} Got {3}".format(
ERROR,
self,
expected,
got,
))
print("\t{0}".format(capabilities_dict[self.name].kllify()))
raise AssertionError("Invalid arguments")
total_bytes += capabilities_dict[self.name].association.arg_list[index].width
# Otherwise use the set width
else:
total_bytes += arg.width
return total_bytes
class NoneId(CapId):
'''
None identifier
It's just a capability...that does nothing (instead of infering to do something else)
'''
def __init__(self):
super().__init__('None', 'None')
def json(self):
'''
JSON representation of NoneId
'''
return {
'type' : self.type,
}
def __repr__(self):
return "None"
class CapArgId(Id):
'''
Capability Argument identifier
'''
def __init__(self, name, width=None):
'''
@param name: Name of argument
@param width: Byte-width of the argument, if None, this is not port of a capability definition
'''
Id.__init__(self)
self.name = name
self.width = width
self.type = 'CapArg'
def __repr__(self):
if self.width is None:
return "{0}".format(self.name)
else:
return "{0}:{1}".format(self.name, self.width)
def json(self):
'''
JSON representation of CapArgId
'''
return {
'name' : self.name,
'width' : self.width,
'type' : self.type,
}
class CapArgValue(Id):
'''
Capability Argument Value identifier
'''
def __init__(self, value):
'''
@param value: Value of argument
'''
Id.__init__(self)
self.value = value
self.type = 'CapArgValue'
def __repr__(self):
return "{}".format(self.value)
def json(self):
'''
JSON representation of CapArgValue
'''
return {
'value' : self.value,
'type' : self.type,
}
PK nPMW+ + kll/common/modifier.py#!/usr/bin/env python3
'''
KLL Modifier Containers
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class AnimationModifierArg:
'''
Animation modification arg container class
'''
def __init__(self, parent, value):
self.parent = parent
self.arg = value
self.subarg = None
# In case we have a bad modifier, arg is set to None
if self.arg is None:
return
# Sub-arg case
if isinstance(value, tuple):
self.arg = value[0]
self.subarg = value[1]
# Validate arg
validation = parent.valid_modifiers[parent.name]
if isinstance(validation, dict):
# arg
if self.arg not in validation.keys():
print("{0} '{1}' is not a valid modifier arg for '{2}'".format(
ERROR,
self.arg,
parent.name,
))
# subarg
subvalidation = validation[self.arg]
if subvalidation is None and self.subarg is not None:
print("{0} '{1}' is an incorrect subargument for '{2}:{3}', should be a '{4}'".format(
ERROR,
self.subarg,
parent.name,
self.arg,
subvalidation,
))
elif subvalidation is not None and not isinstance(self.subarg, subvalidation[self.arg]):
print("{0} '{1}' is an incorrect subargument for '{2}:{3}', should be a '{4}'".format(
ERROR,
self.subarg,
parent.name,
self.arg,
subvalidation,
))
else:
# arg
if not isinstance(self.arg, validation):
print("{0} '{1}' is an incorrect argument for '{2}', should be a '{3}'".format(
ERROR,
self.arg,
parent.name,
validation,
))
def __repr__(self):
if self.arg is None:
return ""
if self.subarg is not None:
int_list = ["{0}".format(x) for x in self.subarg]
return "{0}({1})".format(
self.arg,
",".join(int_list),
)
return "{0}".format(self.arg)
def like(self, other):
'''
Returns true if the other AnimationModifierArg is the same
'''
if self.arg != other.arg:
return False
if self.subarg is None and other.subarg is None:
return True
elif self.subarg is None or other.subarg is None:
return False
if frozenset(self.subarg) == frozenset(other.subarg):
return True
return False
def json(self):
'''
JSON representation of AnimationModifierArg
'''
return {
'arg': self.arg,
'subarg': self.subarg,
}
def kllify(self):
'''
Returns KLL version of the Modifier
In most cases we can just the string representation of the object
'''
return "{0}".format(self)
class AnimationModifier:
'''
Animation modification container class
'''
# Modifier validation tree
valid_modifiers = {
'loops': int,
'loop': None,
'framedelay': int,
'framestretch': None,
'start': None,
'pause': None,
'stop': None,
'single': None,
'pos': int,
'pfunc': {
'off': None,
'interp': None,
'kllinterp': None,
},
'ffunc': {
'off': None,
'interp': None,
'kllinterp': None,
},
'replace': {
'stack': None,
'basic': None,
'all': None,
'state': None,
'clear': None,
},
}
def __init__(self, name, value=None):
# Check if name is valid
if name not in self.valid_modifiers.keys():
print("{0} '{1}' is not a valid modifier {1}:{2}".format(
ERROR,
name,
value,
))
self.name = ''
self.value = AnimationModifierArg(self, None)
return
self.name = name
self.value = AnimationModifierArg(self, value)
def __repr__(self):
if self.value.arg is None:
return "{0}".format(self.name)
return "{0}:{1}".format(self.name, self.value)
def like(self, other):
'''
Returns true if AnimationModifier has the same name
'''
return other.name == self.name
def __eq__(self, other):
return self.like(other) and self.value.like(other.value)
def json(self):
'''
JSON representation of AnimationModifier
'''
# Determine json of self.value
value = None
if self.value is not None:
value = self.value.json()
return {
'name': self.name,
'value': value,
}
def kllify(self):
'''
Returns KLL version of the Modifier
In most cases we can just the string representation of the object
'''
return "{0}".format(self)
class AnimationModifierList:
'''
Animation modification container list class
Contains a list of modifiers, the order does not matter
'''
frameoption_modifiers = [
'framestretch',
]
def __init__(self):
self.modifiers = []
def setModifiers(self, modifier_list):
'''
Apply modifiers to Animation
'''
for modifier in modifier_list:
self.modifiers.append(AnimationModifier(modifier[0], modifier[1]))
def clean(self, new_modifier, new, old):
'''
Remove conflicting modifier if necessary
'''
if new_modifier.name == new:
for index, modifier in enumerate(self.modifiers):
if modifier.name == old:
return False
return True
def replace(self, new_modifier):
'''
Replace modifier
If it doesn't exist already, just add it.
'''
# If new_modifier is loops and loop exists, remove loop
if not self.clean(new_modifier, 'loops', 'loop'):
return
# If new_modifier is loop and loops exists, remove loops
if not self.clean(new_modifier, 'loop', 'loops'):
return
# Check for modifier
for modifier in self.modifiers:
if modifier.name == new_modifier.name:
modifier.value = new_modifier.value
return
# Otherwise just add it
self.modifiers.append(new_modifier)
def getModifier(self, name):
'''
Retrieves modifier
Returns False if doesn't exist
Returns argument if exists and has an argument, may be None
'''
for mod in self.modifiers:
if mod.name == name:
return mod.value
return False
def strModifiers(self):
'''
__repr__ of Position when multiple inheritance is used
'''
output = ""
for index, modifier in enumerate(sorted(self.modifiers, key=lambda x: x.name)):
if index > 0:
output += ","
output += "{0}".format(modifier)
return output
def __repr__(self):
return self.strModifiers()
def json(self):
'''
JSON representation of AnimationModifierList
'''
output = {
'modifiers' : [],
}
# Output sorted list of modifiers
for modifier in sorted(self.modifiers, key=lambda x: x.name):
output['modifiers'].append(modifier.json())
# Look for any frameoption modifiers
frameoption_list = []
for modifier in self.modifiers:
if modifier.name in self.frameoption_modifiers:
frameoption_list.append(modifier.name)
output['frameoptions'] = frameoption_list
return output
def kllify(self):
'''
Returns KLL version of the ModifierList
In most cases we can just the string representation of the object
'''
return "{0}".format(self)
class PixelModifier:
'''
Pixel modification container class
'''
def __init__(self, operator, value):
self.operator = operator
self.value = value
def __repr__(self):
if self.operator is None:
return "{0}".format(self.value)
return "{0}{1}".format(self.operator, self.value)
def operator_type(self):
'''
Returns operator type
'''
types = {
None: 'Set',
'+': 'Add',
'-': 'Subtract',
'+:': 'NoRoll_Add',
'-:': 'NoRoll_Subtract',
'<<': 'LeftShift',
'>>': 'RightShift',
}
return types[self.operator]
def kllify(self):
'''
Returns KLL version of the PixelModifier
In most cases we can just the string representation of the object
'''
return "{0}".format(self)
class PixelModifierList:
'''
Pixel modification container list class
Contains a list of modifiers
Index 0, corresponds to pixel 0
'''
def __init__(self):
self.modifiers = []
def setModifiers(self, modifier_list):
'''
Apply modifier to each pixel channel
'''
for modifier in modifier_list:
self.modifiers.append(PixelModifier(modifier[0], modifier[1]))
def strModifiers(self):
'''
__repr__ of Position when multiple inheritance is used
'''
output = ""
for index, modifier in enumerate(self.modifiers):
if index > 0:
output += ","
output += "{0}".format(modifier)
return output
def __repr__(self):
return self.strModifiers()
def kllify(self):
'''
Returns KLL version of the PixelModifierList
In most cases we can just the string representation of the object
'''
return "{0}".format(self)
PK RKMs s kll/common/organization.py#!/usr/bin/env python3
'''
KLL Data Organization
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import copy
import re
import kll.common.expression as expression
from kll.common.id import PixelAddressId
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
ansi_escape = re.compile(r'\x1b[^m]*m')
### Classes ###
class Data:
'''
Base class for KLL datastructures
'''
# Debug output formatters
debug_output = {
'add': "\t\033[1;42;37m++\033[0m\033[1mADD KEY\033[1;42;37m++\033[0m \033[1m<==\033[0m {0}",
'app': "\t\033[1;45;37m**\033[0m\033[1mAPP KEY\033[1;45;37m**\033[0m \033[1m<==\033[0m {0}",
'mod': "\t\033[1;44;37m##\033[0m\033[1mMOD KEY\033[1;44;37m##\033[0m \033[1m<==\033[0m {0}",
'rem': "\t\033[1;41;37m--\033[0m\033[1mREM KEY\033[1;41;37m--\033[0m \033[1m<==\033[0m {0}",
'drp': "\t\033[1;43;37m@@\033[0m\033[1mDRP KEY\033[1;43;37m@@\033[0m \033[1m<==\033[0m {0}",
'dup': "\t\033[1;46;37m!!\033[0m\033[1mDUP KEY\033[1;46;37m!!\033[0m \033[1m<==\033[0m {0}",
}
def __init__(self, parent):
'''
Initialize datastructure
@param parent: Parent organization, used to query data from other datastructures
'''
self.data = {}
self.parent = parent
self.connect_id = 0
self.merge_in_log = []
def merge_in_log_prune(self, debug):
'''
Prune the merge_in_log
Reverse searches the list, if the key already exists, disable the key
'''
new_log = []
found = []
# We have to manually reverse, then modify the referenced items
# i.e. we're still modifying self.merge_in_log
# This is done so we still have a proper index, and do the invalidation in the correct order
reversed_log = self.merge_in_log[::-1]
for index, elem in enumerate(reversed_log):
key, expr, enabled = elem
# Add to found list
if key not in found:
found.append(key)
new_log.insert(0, elem)
# Otherwise mark as disabled
else:
reversed_log[index] = [
key,
expr,
False,
]
return new_log
def merge_in_log_expression(self, key, expression, debug):
'''
Logs a given merge_in expressions
This is used to determine the order in which the merges occurred
Duplicate entries are pruned after the merge
@param key: Hash entry for (text)
@param expression: Expression object
@param debug: Enable debug out
'''
# Debug output
if debug[0]:
output = "{0} Log Add: {1} {2}".format(
self.parent.parent.layer_info(),
key,
expression,
)
print(debug[1] and output or ansi_escape.sub('', output))
# Add to log, and enable key
self.merge_in_log.append([key, expression, True])
def add_expression(self, expression, debug):
'''
Add expression to data structure
May have multiple keys to add for a given expression
@param expression: KLL Expression (fully tokenized and parsed)
@param debug: Enable debug output
'''
# Lookup unique keys for expression
keys = expression.unique_keys()
# Add/Modify expressions in datastructure
for key, uniq_expr in keys:
# Check which operation we are trying to do, add or modify
if debug[0]:
if key in self.data.keys():
output = self.debug_output['mod'].format(key)
else:
output = self.debug_output['add'].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
self.data[key] = uniq_expr
# Add to log
self.merge_in_log_expression(key, uniq_expr, debug)
def merge(self, merge_in, map_type, debug):
'''
Merge in the given datastructure to this datastructure
This datastructure serves as the base.
@param merge_in: Data structure from another organization to merge into this one
@param map_type: Used fo map specific merges
@param debug: Enable debug out
'''
# The default case is just to add the expression in directly
for key, kll_expression in merge_in.data.items():
# Set ConnectId in expression
kll_expression.connect_id = merge_in.connect_id
# Display key:expression being merged in
if debug[0]:
output = merge_in.elem_str(key, True)
print(debug[1] and output or ansi_escape.sub('', output), end="")
self.add_expression(kll_expression, debug)
# Add to log
self.merge_in_log_expression(key, kll_expression, debug)
def reduction(self, debug=False):
'''
Simplifies datastructure
Most of the datastructures don't have a reduction. Just do nothing in this case.
'''
pass
def cleanup(self, debug=False):
'''
Post-processing step for merges that may need to remove some data in the organization.
Mainly used for dropping BaseMapContext expressions after generating a PartialMapContext.
'''
pass
def connectid(self, connect_id):
'''
Sets the Data store with a given connect_id
By default, this is 0, but may be set prior to an organization merge
'''
self.connect_id = connect_id
def elem_str(self, key, single=False):
'''
Debug output for a single element
@param key: Index to datastructure
@param single: Setting to True will bold the key
'''
if single:
return "\033[1;33m{0: <20}\033[0m \033[1;36;41m>\033[0m {1}\n".format(key, self.data[key])
else:
return "{0: <20} \033[1;36;41m>\033[0m {1}\n".format(key, self.data[key])
def __repr__(self):
output = ""
# Display sorted list of keys, along with the internal value
for key in sorted(self.data):
output += self.elem_str(key)
return output
class MappingData(Data):
'''
KLL datastructure for data mapping
ScanCode trigger -> result
USBCode trigger -> result
Animation trigger -> result
'''
def add_expression(self, expression, debug):
'''
Add expression to data structure
May have multiple keys to add for a given expression
Map expressions insert into the datastructure according to their operator.
+Operators+
: Add/Modify
:+ Append
:- Remove
:: Lazy Add/Modify
i: Add/Modify
i:+ Append
i:- Remove
i:: Lazy Add/Modify
The i or isolation operators are stored separately from the main ones.
Each key is pre-pended with an i
The :: or lazy operators act just like : operators, except that they will be ignore if the evaluation
merge cannot resolve a ScanCode.
@param expression: KLL Expression (fully tokenized and parsed)
@param debug: Enable debug output
'''
# Lookup unique keys for expression
keys = expression.unique_keys()
# Add/Modify expressions in datastructure
for ukey, uniq_expr in keys:
# Determine which the expression operator
operator = expression.operator
# Except for the : operator, all others have delayed action
# Meaning, they change behaviour depending on how Contexts are merged
# This means we can't simplify yet
# In addition, :+ and :- are stackable, which means each key has a list of expressions
# We append the operator to differentiate between the different types of delayed operations
key = "{0}{1}".format(operator, ukey)
# Determine if key exists already
exists = key in self.data.keys()
# Add/Modify
if operator in [':', '::', 'i:', 'i::']:
debug_tag = exists and 'mod' or 'add'
# Append/Remove
else:
# Check to make sure we haven't already appended expression
# Use the string representation to do the comparison (general purpose)
if exists and "{0}".format(uniq_expr) in ["{0}".format(elem) for elem in self.data[key]]:
debug_tag = 'dup'
# Append
elif operator in [':+', 'i:+']:
debug_tag = 'app'
# Remove
else:
debug_tag = 'rem'
# Debug output
if debug[0]:
output = self.debug_output[debug_tag].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# Don't append if a duplicate
if debug_tag == 'dup':
continue
# Append, rather than replace
if operator in [':+', ':-', 'i:+', 'i:-']:
if exists:
self.data[key].append(uniq_expr)
# Create initial list
else:
self.data[key] = [uniq_expr]
else:
self.data[key] = [uniq_expr]
# Append to log
self.merge_in_log_expression(key, uniq_expr, debug)
def set_interconnect_id(self, interconnect_id, triggers):
'''
Traverses the sequence of combo of identifiers to set the interconnect_id
'''
for sequence in triggers:
for combo in sequence:
for identifier in combo:
identifier.interconnect_id = interconnect_id
def connectid(self, connect_id):
'''
Sets the Data store with a given connect_id
By default, this is 0, but may be set prior to an organization merge
'''
self.connect_id = connect_id
# Update dictionary keys using new connect_id
for key, value in self.data.items():
if value[0].type == 'ScanCode':
# Update connect_id, then regenerate dictionary item
value[0].connect_id = connect_id
new_key = "{0}{1}".format(
value[0].operator,
value[0].unique_keys()[0][0],
)
# Replace dictionary item
self.data[new_key] = self.data.pop(key)
def maxscancode(self):
'''
Find max scancode per connect id
@return: Dictionary of max Scan Codes (keys are the connect id)
'''
max_dict = {}
for key, value in self.data.items():
connect_id = value[0].connect_id
max_uid = value[0].max_trigger_uid()
# Initial value
if connect_id not in max_dict.keys():
max_dict[connect_id] = 0
# Update if necessary
if max_dict[connect_id] < max_uid:
max_dict[connect_id] = max_uid
return max_dict
def merge_lazy_operators(self, debug):
'''
Lazy Set :: is not applied as a Set : until after the merge_in_log has been pruned
Intended to be called during reduction.
'''
# Build dictionary of single ScanCodes first
result_code_lookup = {}
for key, expr in self.data.items():
if expr[0].elems()[0] == 1 and expr[0].triggers[0][0][0].type == 'ScanCode':
result_code_lookup.setdefault(expr[0].result_str(), []).append(key)
# Build list of lazy keys from log
lazy_keys = {}
for key, expr, enabled in reversed(self.merge_in_log):
if key[0:2] == '::' or key[0:3] == 'i::':
if key not in lazy_keys.keys():
# Debug info
if debug:
print("\033[1mLazy\033[0m", key, expr)
# Determine the target key from the expression
target_key = expr.trigger_str()
lazy_keys[target_key] = expr
# Check if we need to do a lazy replacement
if target_key in result_code_lookup.keys():
expr_keys = result_code_lookup[target_key]
for target_expr_key in expr_keys:
# Calculate new key
new_expr = self.data[target_expr_key][0]
new_key = "{0}{1}".format(
new_expr.operator,
new_expr.unique_keys()[0][0]
)
# Determine action based on the new_expr.operator
orig_expr = self.data[new_key][0]
if debug:
print("\t\033[1;32mREPLACE\033[0m {0} -> {1}\n\t{2} => {3}".format(
target_expr_key,
new_key,
expr,
new_expr
))
# Do replacement
self.data[new_key] = [expression.MapExpression(
orig_expr.triggers,
orig_expr.operator,
expr.results
)]
self.data[new_key][0].connect_id = orig_expr.connect_id
# Unset basemap on expression
self.data[new_key][0].base_map = False
def merge(self, merge_in, map_type, debug):
'''
Merge in the given datastructure to this datastructure
This datastructure serves as the base.
Map expressions merge differently than insertions.
+Operators+
: Add/Modify - Replace
:+ Append - Add
:- Remove - Remove
:: Lazy Add/Modify - Replace if found, otherwise drop
i: Add/Modify - Replace
i:+ Append - Add
i:- Remove - Remove
i:: Lazy Add/Modify - Replace if found, otherwise drop
@param merge_in: Data structure from another organization to merge into this one
@param map_type: Used fo map specific merges
@param debug: Enable debug out
'''
# Get unique list of ordered keys
# We can't just query the keys directly from the as we need them in order of being added
# In addition, we need a unique list of keys, where the most recently added is the most important
cur_keys = []
for key, expr, enabled in reversed(merge_in.merge_in_log):
if key not in cur_keys:
cur_keys.insert(0, key)
# Lazy Set ::
lazy_keys = [key for key in cur_keys if key[0:2] == '::' or key[0:3] == 'i::']
cur_keys = list(set(cur_keys) - set(lazy_keys))
# Append :+
append_keys = [key for key in cur_keys if key[0:2] == ':+' or key[0:3] == 'i:+']
cur_keys = list(set(cur_keys) - set(append_keys))
# Remove :-
remove_keys = [key for key in cur_keys if key[0:2] == ':-' or key[0:3] == 'i:-']
cur_keys = list(set(cur_keys) - set(remove_keys))
# Set :
# Everything left is just a set
set_keys = cur_keys
# First process the :: (or lazy) operators
# We need to read into this datastructure and apply those first
# Otherwise we may get undesired behaviour
for key in lazy_keys:
# Display key:expression being merged in
if debug[0]:
output = merge_in.elem_str(key, True)
print(debug[1] and output or ansi_escape.sub('', output), end="")
# Construct target key
# XXX (HaaTa) We now delay lazy operation application till reduction
#target_key = key[0] == 'i' and "i{0}".format(key[2:]) or key[1:]
target_key = key
# Lazy expressions will be dropped later at reduction
debug_tag = 'mod'
# Debug output
if debug[0]:
output = self.debug_output[debug_tag].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# Only replace
self.data[target_key] = merge_in.data[key]
# Unset BaseMapContext tag if not a BaseMapContext
if map_type != 'BaseMapContext':
self.data[target_key][0].base_map = False
# Then apply : assignment operators
for key in set_keys:
# Display key:expression being merged in
if debug[0]:
output = merge_in.elem_str(key, True)
print(debug[1] and output or ansi_escape.sub('', output), end="")
# Construct target key
target_key = key
# Indicate if add or modify
if target_key in self.data.keys():
debug_tag = 'mod'
else:
debug_tag = 'add'
# Debug output
if debug[0]:
output = self.debug_output[debug_tag].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# Set into new datastructure regardless
self.data[target_key] = merge_in.data[key]
# Unset BaseMap flag if this is not a BaseMap merge
if map_type != 'BaseMapContext':
self.data[target_key][0].base_map = False
# Now apply append operations
for key in append_keys:
# Display key:expression being merged in
if debug[0]:
output = merge_in.elem_str(key, True)
print(debug[1] and output or ansi_escape.sub('', output), end="")
# Construct target key
# XXX (HaaTa) Might not be correct, but seems to work with the merge_in_log
#target_key = key[0] == 'i' and "i:{0}".format(key[3:]) or ":{0}".format(key[2:])
target_key = key
# Alwyays appending
debug_tag = 'app'
# Debug output
if debug[0]:
output = self.debug_output[debug_tag].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# Extend list if it exists
if target_key in self.data.keys():
self.data[target_key].extend(merge_in.data[key])
else:
self.data[target_key] = merge_in.data[key]
# Finally apply removal operations to this datastructure
# If the target removal doesn't exist, ignore silently (show debug message)
for key in remove_keys:
# Display key:expression being merged in
if debug[0]:
output = merge_in.elem_str(key, True)
print(debug[1] and output or ansi_escape.sub('', output), end="")
# Construct target key
# XXX (HaaTa) Might not be correct, but seems to work with the merge_in_log
#target_key = key[0] == 'i' and "i:{0}".format(key[3:]) or ":{0}".format(key[2:])
target_key = key
# Drop right away if target datastructure doesn't have target key
if target_key not in self.data.keys():
debug_tag = 'drp'
# Debug output
if debug[0]:
output = self.debug_output[debug_tag].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
continue
# Compare expressions to be removed with the current set
# Use strings to compare
remove_expressions = ["{0}".format(expr) for expr in merge_in.data[key]]
current_expressions = [("{0}".format(expr), expr) for expr in self.data[target_key]]
for string, expr in current_expressions:
debug_tag = 'drp'
# Check if an expression matches
if string in remove_expressions:
debug_tag = 'rem'
# Debug output
if debug[0]:
output = self.debug_output[debug_tag].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# Remove if found
if debug_tag == 'rem':
self.data[target_key] = [value for value in self.data.values() if value != expr]
# Now append the merge_in_log
self.merge_in_log.extend(merge_in.merge_in_log)
def cleanup(self, debug=False):
'''
Post-processing step for merges that may need to remove some data in the organization.
Mainly used for dropping BaseMapContext expressions after generating a PartialMapContext.
'''
# Using this dictionary, replace all the trigger USB codes
# Iterate over a copy so we can modify the dictionary in place
for key, expr in self.data.copy().items():
if expr[0].base_map:
if debug[0]:
output = "\t\033[1;34mDROP\033[0m {0}".format(self.data[key][0])
print(debug[1] and output or ansi_escape.sub('', output))
del self.data[key]
elif debug[0]:
output = "\t\033[1;32mKEEP\033[0m {0}".format(self.data[key][0])
print(debug[1] and output or ansi_escape.sub('', output))
def reduction(self, debug=False):
'''
Simplifies datastructure
Used to replace all trigger HIDCode(USBCode)s with ScanCodes
NOTE: Make sure to create a new MergeContext before calling this as you lose data and prior context
'''
result_code_lookup = {}
# Prune merge_in_log
merge_in_pruned = self.merge_in_log_prune(debug)
# Build dictionary of single ScanCodes first
for key, expr in self.data.items():
if expr[0].elems()[0] == 1 and expr[0].triggers[0][0][0].type == 'ScanCode':
result_code_lookup[expr[0].result_str()] = expr
# Skip if dict is empty
if len(self.data.keys()) == 0:
return
# Instead of using the .data dictionary, use the merge_in_log which maintains the expression application order
# Using this list, replace all the trigger USB codes
for key, log_expr, active in self.merge_in_log:
# Skip if not active
if not active:
continue
# Lookup currently merged expression
if key not in self.data.keys():
continue
expr = self.data[key]
for sub_expr in expr:
# 1) Single USB Codes trigger results will replace the original ScanCode result
if sub_expr.elems()[0] == 1 and sub_expr.triggers[0][0][0].type in ['USBCode', 'SysCode', 'ConsCode']:
# Debug info
if debug:
print("\033[1mSingle\033[0m", key, expr)
# Lookup trigger to see if it exists
trigger_str = sub_expr.trigger_str()
if trigger_str in result_code_lookup.keys():
# Calculate new key
new_expr = result_code_lookup[trigger_str][0]
new_key = "{0}{1}".format(
new_expr.operator,
new_expr.unique_keys()[0][0]
)
# Determine action based on the new_expr.operator
orig_expr = self.data[new_key][0]
# Replace expression
if sub_expr.operator in [':']:
if debug:
print("\t\033[1;32mREPLACE\033[0m {0} -> {1}\n\t{2} => {3}".format(
key,
new_key,
sub_expr,
new_expr
))
# Do replacement
self.data[new_key] = [expression.MapExpression(
orig_expr.triggers,
orig_expr.operator,
sub_expr.results
)]
# Transfer connect_id
self.data[new_key][0].connect_id = orig_expr.connect_id
# Unset basemap on expression
self.data[new_key][0].base_map = False
# Add expression
elif sub_expr.operator in [':+']:
if debug:
print("\t\033[1;42mADD\033[0m {0} -> {1}\n\t{2} => {3}".format(
key,
new_key,
sub_expr,
new_expr
))
# Add expression
self.data[new_key].append(expression.MapExpression(
orig_expr.triggers,
orig_expr.operator,
sub_expr.results
))
# Unset basemap on sub results
for sub_expr in self.data[new_key]:
sub_expr.base_map = False
# Remove expression
elif sub_expr.operator in [':-']:
if debug:
print("\t\033[1;41mREMOVE\033[0m {0} -> {1}\n\t{2} => {3}".format(
key,
new_key,
sub_expr,
new_expr
))
# Remove old key
if key in self.data.keys():
del self.data[key]
# Otherwise drop HID expression
else:
if debug:
print("\t\033[1;34mDROP\033[0m")
if key in self.data.keys():
del self.data[key]
# 2) Complex triggers are processed to replace out any USB Codes with Scan Codes
elif sub_expr.elems()[0] > 1:
# Debug info
if debug:
print("\033[1;4mMulti\033[0m ", key, expr)
# Lookup each trigger element and replace
# If any trigger element doesn't exist, drop expression
# Dive through sequence->combo->identifier (sequence of combos of ids)
replace = False
drop = False
for seq_in, sequence in enumerate(sub_expr.triggers):
for com_in, combo in enumerate(sequence):
for ident_in, identifier in enumerate(combo):
ident_str = "({0})".format(identifier)
# Replace identifier
if ident_str in result_code_lookup.keys():
match_expr = result_code_lookup[ident_str]
sub_expr.triggers[seq_in][com_in][ident_in] = match_expr[0].triggers[0][0][0]
replace = True
# Ignore non-USB triggers
elif identifier.type in ['IndCode', 'GenericTrigger', 'Layer', 'LayerLock', 'LayerShift', 'LayerLatch', 'ScanCode']:
pass
# Drop everything else
else:
drop = True
# Trigger Identifier was replaced
if replace:
if debug:
print("\t\033[1;32mREPLACE\033[0m", expr)
# Trigger Identifier failed (may still occur if there was a replacement)
if drop:
if debug:
print("\t\033[1;34mDROP\033[0m")
del self.data[key]
# Finally we can merge in the Lazy :: Set operators
self.merge_lazy_operators(debug)
# Show results of reduction
if debug:
print(self)
class AnimationData(Data):
'''
KLL datastructure for Animation configuration
Animation -> modifiers
'''
class AnimationFrameData(Data):
'''
KLL datastructure for Animation Frame configuration
Animation -> Pixel Settings
'''
class CapabilityData(Data):
'''
KLL datastructure for Capability mapping
Capability -> C Function/Identifier
'''
class DefineData(Data):
'''
KLL datastructure for Define mapping
Variable -> C Define/Identifier
'''
class PixelChannelData(Data):
'''
KLL datastructure for Pixel Channel mapping
Pixel -> Channels
'''
def maxpixelid(self):
'''
Find max pixel id per connect id
@return: dictionary of connect id to max pixel id
'''
max_pixel = {}
for key, value in self.data.items():
connect_id = value.connect_id
# Make sure this is a PixelAddressId
if isinstance(value.pixel.uid, PixelAddressId):
max_uid = value.pixel.uid.index
else:
max_uid = value.pixel.uid
# Initial value
if connect_id not in max_pixel.keys():
max_pixel[connect_id] = 0
# Update if necessary
if max_pixel[connect_id] < max_uid:
max_pixel[connect_id] = max_uid
# TODO REMOVEME
#print( key,value, value.__class__, value.pixel.uid.index, value.connect_id )
return max_pixel
class PixelPositionData(Data):
'''
KLL datastructure for Pixel Position mapping
Pixel -> Physical Location
'''
def add_expression(self, expression, debug):
'''
Add expression to data structure
May have multiple keys to add for a given expression
@param expression: KLL Expression (fully tokenized and parsed)
@param debug: Enable debug output
'''
# Lookup unique keys for expression
keys = expression.unique_keys()
# Add/Modify expressions in datastructure
for key, uniq_expr in keys:
# Check which operation we are trying to do, add or modify
if debug[0]:
if key in self.data.keys():
output = self.debug_output['mod'].format(key)
else:
output = self.debug_output['add'].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# If key already exists, just update
if key in self.data.keys():
self.data[key].update(uniq_expr)
else:
self.data[key] = uniq_expr
# Append to log
self.merge_in_log_expression(key, uniq_expr, debug)
class ScanCodePositionData(Data):
'''
KLL datastructure for ScanCode Position mapping
ScanCode -> Physical Location
'''
def add_expression(self, expression, debug):
'''
Add expression to data structure
May have multiple keys to add for a given expression
@param expression: KLL Expression (fully tokenized and parsed)
@param debug: Enable debug output
'''
# Lookup unique keys for expression
keys = expression.unique_keys()
# Add/Modify expressions in datastructure
for key, uniq_expr in keys:
# Check which operation we are trying to do, add or modify
if debug[0]:
if key in self.data.keys():
output = self.debug_output['mod'].format(key)
else:
output = self.debug_output['add'].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# If key already exists, just update
if key in self.data.keys():
self.data[key].update(uniq_expr)
else:
self.data[key] = uniq_expr
# Append to log
self.merge_in_log_expression(key, uniq_expr, debug)
class VariableData(Data):
'''
KLL datastructure for Variables and Arrays
Variable -> Data
Array -> Data
'''
def add_expression(self, expression, debug):
'''
Add expression to data structure
May have multiple keys to add for a given expression
In the case of indexed variables, only replaced the specified index
@param expression: KLL Expression (fully tokenized and parsed)
@param debug: Enable debug output
'''
# Lookup unique keys for expression
keys = expression.unique_keys()
# Add/Modify expressions in datastructure
for key, uniq_expr in keys:
# Check which operation we are trying to do, add or modify
if debug[0]:
if key in self.data.keys():
output = self.debug_output['mod'].format(key)
else:
output = self.debug_output['add'].format(key)
print(debug[1] and output or ansi_escape.sub('', output))
# Check to see if we need to cap-off the array (a position parameter is given)
if uniq_expr.type == 'Array' and uniq_expr.pos is not None:
# Modify existing array
if key in self.data.keys():
self.data[key].merge_array(uniq_expr)
# Add new array
else:
uniq_expr.merge_array()
self.data[key] = uniq_expr
# Otherwise just add/replace expression
else:
self.data[key] = uniq_expr
# Append to log
self.merge_in_log_expression(key, uniq_expr, debug)
class Organization:
'''
Container class for KLL datastructures
The purpose of these datastructures is to symbolically store at first, and slowly solve/deduplicate expressions.
Since the order in which the merges occurs matters, this involves a number of intermediate steps.
'''
def __init__(self, parent):
'''
Intialize data structure
'''
self.parent = parent
# Setup each of the internal sub-datastructures
self.animation_data = AnimationData(self)
self.animation_frame_data = AnimationFrameData(self)
self.capability_data = CapabilityData(self)
self.define_data = DefineData(self)
self.mapping_data = MappingData(self)
self.pixel_channel_data = PixelChannelData(self)
self.pixel_position_data = PixelPositionData(self)
self.scan_code_position_data = ScanCodePositionData(self)
self.variable_data = VariableData(self)
# Expression to Datastructure mapping
self.data_mapping = {
'AssignmentExpression': {
'Array': self.variable_data,
'Variable': self.variable_data,
},
'DataAssociationExpression': {
'Animation': self.animation_data,
'AnimationFrame': self.animation_frame_data,
'PixelPosition': self.pixel_position_data,
'ScanCodePosition': self.scan_code_position_data,
},
'MapExpression': {
'TriggerCode': self.mapping_data,
'PixelChannel': self.pixel_channel_data,
},
'NameAssociationExpression': {
'Capability': self.capability_data,
'Define': self.define_data,
},
}
def __copy__(self):
'''
On organization copy, return a safe object
Attempts to only copy the datastructures that may need to diverge
'''
new_obj = Organization(self.parent)
# Copy only .data from each organization
new_obj.animation_data.data = copy.copy(self.animation_data.data)
new_obj.animation_frame_data.data = copy.copy(self.animation_frame_data.data)
new_obj.capability_data.data = copy.copy(self.capability_data.data)
new_obj.define_data.data = copy.copy(self.define_data.data)
new_obj.mapping_data.data = copy.copy(self.mapping_data.data)
new_obj.pixel_channel_data.data = copy.copy(self.pixel_channel_data.data)
new_obj.pixel_position_data.data = copy.copy(self.pixel_position_data.data)
new_obj.scan_code_position_data.data = copy.copy(self.scan_code_position_data.data)
new_obj.variable_data.data = copy.copy(self.variable_data.data)
# Also copy merge_in_log
new_obj.animation_data.merge_in_log = copy.copy(self.animation_data.merge_in_log)
new_obj.animation_frame_data.merge_in_log = copy.copy(self.animation_frame_data.merge_in_log)
new_obj.capability_data.merge_in_log = copy.copy(self.capability_data.merge_in_log)
new_obj.define_data.merge_in_log = copy.copy(self.define_data.merge_in_log)
new_obj.mapping_data.merge_in_log = copy.copy(self.mapping_data.merge_in_log)
new_obj.pixel_channel_data.merge_in_log = copy.copy(self.pixel_channel_data.merge_in_log)
new_obj.pixel_position_data.merge_in_log = copy.copy(self.pixel_position_data.merge_in_log)
new_obj.scan_code_position_data.merge_in_log = copy.copy(self.scan_code_position_data.merge_in_log)
new_obj.variable_data.merge_in_log = copy.copy(self.variable_data.merge_in_log)
return new_obj
def stores(self):
'''
Returns list of sub-datastructures
'''
return [
self.animation_data,
self.animation_frame_data,
self.capability_data,
self.define_data,
self.mapping_data,
self.pixel_channel_data,
self.pixel_position_data,
self.scan_code_position_data,
self.variable_data,
]
def add_expression(self, expression, debug):
'''
Add expression to datastructure
Will automatically determine which type of expression and place in the relevant store
@param expression: KLL Expression (fully tokenized and parsed)
@param debug: Enable debug output
'''
# Determine type of of Expression
expression_type = expression.__class__.__name__
# Determine Expression Subtype
expression_subtype = expression.type
# Locate datastructure
data = self.data_mapping[expression_type][expression_subtype]
# Debug output
if debug[0]:
output = "\t\033[4m{0}\033[0m".format(data.__class__.__name__)
print(debug[1] and output or ansi_escape.sub('', output))
# Add expression to determined datastructure
data.add_expression(expression, debug)
def merge(self, merge_in, map_type, debug):
'''
Merge in the given organization to this organization
This organization serves as the base.
@param merge_in: Organization to merge into this one
@param map_type: Used fo map specific merges
@param debug: Enable debug out
'''
# Merge each of the sub-datastructures
for this, that in zip(self.stores(), merge_in.stores()):
this.merge(that, map_type, debug)
def cleanup(self, debug=False):
'''
Post-processing step for merges that may need to remove some data in the organization.
Mainly used for dropping BaseMapContext expressions after generating a PartialMapContext.
'''
for store in self.stores():
store.cleanup(debug)
def reduction(self, debug=False):
'''
Simplifies datastructure
NOTE: This will remove data, therefore, context is lost
'''
for store in self.stores():
store.reduction(debug)
def maxscancode(self):
'''
Find max scancode per connect id
@return: dictionary of connect id to max scancode
'''
return self.mapping_data.maxscancode()
def maxpixelid(self):
'''
Find max pixel id per connect id
@return: dictionary of connect id to max pixel id
'''
return self.pixel_channel_data.maxpixelid()
def __repr__(self):
return "{0}".format(self.stores())
PK gSM[Ď Ď kll/common/parse.py#!/usr/bin/env python3
'''
KLL Parsing Expressions
This file contains various parsing rules and processors used by funcparserlib for KLL
REMEMBER: When editing parser BNF-like expressions, order matters. Specifically lexer tokens and parser |
'''
# Parser doesn't play nice with linters, disable some checks
# pylint: disable=no-self-argument, too-many-public-methods, no-self-use, bad-builtin
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
from kll.common.id import (
AnimationId, AnimationFrameId,
CapArgId, CapArgValue, CapId,
HIDId,
LayerId,
NoneId,
PixelAddressId, PixelId, PixelLayerId,
ScanCodeId,
TriggerId
)
from kll.common.modifier import AnimationModifierList
from kll.common.schedule import AnalogScheduleParam, ScheduleParam, Time
from kll.extern.funcparserlib.lexer import Token
from kll.extern.funcparserlib.parser import (some, a, many, oneplus, skip, maybe)
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
# Parsing Functions
class Make:
'''
Collection of parse string interpreters
'''
def scanCode(token):
'''
Converts a raw scan code string into an ScanCodeId /w integer
S0x10 -> 16
'''
if isinstance(token, int):
return ScanCodeId(token)
else:
return ScanCodeId(int(token[1:], 0))
def hidCode(type, token):
'''
Convert a given raw hid token string to an integer /w a type
U"Enter" -> USB, Enter(0x28)
'''
# If already converted to a HIDId, just return
if isinstance(token, HIDId):
return token
# If first character is a U or I, strip
token_val = token.value
if token_val[0] == "U" or token_val[0] == "I":
token_val = token_val[1:]
# CONS specifier
elif 'CONS' in token_val:
token_val = token_val[4:]
# SYS specifier
elif 'SYS' in token_val:
token_val = token_val[3:]
# Determine locale
locale = token.locale
# Determine lookup dictionary
lookup = None
if type == 'USBCode':
lookup = locale.dict('from_hid_keyboard', key_caps=True)
elif type == 'SysCode':
lookup = locale.dict('from_hid_sysctrl', key_caps=True)
elif type == 'ConsCode':
lookup = locale.dict('from_hid_consumer', key_caps=True)
elif type == 'IndCode':
lookup = locale.dict('from_hid_led', key_caps=True)
# If using string representation of USB Code, do lookup, case-insensitive
if '"' in token_val:
try:
match_name = token_val[1:-1].upper()
hid_code = int(lookup[match_name], 0)
except LookupError as err:
print("{} {} ({}) is an invalid USB HID Code Lookup...".format(
ERROR,
err,
locale
))
raise
else:
# Already tokenized
if (
type == 'USBCode' and token_val[0] == 'USB'
or
type == 'SysCode' and token_val[0] == 'SYS'
or
type == 'ConsCode' and token_val[0] == 'CONS'
or
type == 'IndCode' and token_val[0] == 'IND'
):
hid_code = token_val[1]
# Convert
else:
hid_code = int(token_val, 0)
return HIDId(type, hid_code, locale)
def usbCode(token):
'''
Convert a given raw USB Keyboard hid token string to an integer /w a type
U"Enter" -> USB, Enter(0x28)
'''
return Make.hidCode('USBCode', token)
def consCode(token):
'''
Convert a given raw Consumer Control hid token string to an integer /w a type
'''
return Make.hidCode('ConsCode', token)
def sysCode(token):
'''
Convert a given raw System Control hid token string to an integer /w a type
'''
return Make.hidCode('SysCode', token)
def indCode(token):
'''
Convert a given raw Indicator hid token string to an integer /w a type
'''
return Make.hidCode('IndCode', token)
def animation(name):
'''
Converts a raw animation value into an AnimationId /w name
A"myname" -> myname
'''
if name[0] == "A":
return AnimationId(name[2:-1])
else:
return AnimationId(name)
def animationTrigger(animation, specifier):
'''
Generate an AnimationId
'''
trigger_list = []
# AnimationId
trigger_list.append(AnimationId(animation))
return trigger_list, specifier
def animationAssociation(animation, frame_identifier):
'''
Generate an AnimationFrameId
'''
trigger_list = []
# AnimationFrameId
for index in frame_identifier:
trigger_list.append([[AnimationFrameId(animation, index)]])
return trigger_list
def animationCapability(animation, modifiers):
'''
Apply modifiers to AnimationId
'''
if modifiers is not None:
animation.setModifiers(modifiers)
return [animation]
def animationModlist(modifiers):
'''
Build an AnimationModifierList
Only used for animation data association
'''
modlist = AnimationModifierList()
modlist.setModifiers(modifiers)
return modlist
def pixelCapability(pixels, modifiers):
'''
Apply modifiers to list of pixels/pixellists
Results in a combination of pixel capabilities
'''
pixelcap_list = []
for pixel in pixels:
pixel.setModifiers(modifiers)
pixelcap_list.append(pixel)
return pixelcap_list
def pixel(token):
'''
Converts a raw pixel value into a PixelId /w integer
P0x3 -> 3
'''
if isinstance(token, int):
return PixelId(token)
else:
return PixelId(int(token[1:], 0))
def pixel_list(pixel_list):
'''
Converts a list a numbers into a list of PixelIds
'''
pixels = []
for pixel in pixel_list:
pixels.append(PixelId(pixel))
return pixels
def pixelLayer(token):
'''
Converts a raw pixel layer value into a PixelLayerId /w integer
PL0x3 -> 3
'''
if isinstance(token, int):
return PixelLayerId(token)
else:
return PixelLayerId(int(token[2:], 0))
def pixelLayer_list(layer_list):
'''
Converts a list a numbers into a list of PixelLayerIds
'''
layers = []
for layer in layer_list:
layers.append(PixelLayerId(layer))
return layers
def pixelchan(pixel_list, chans):
'''
Apply channels to PixelId
Only one pixel at a time can be mapped, hence pixel_list[0]
'''
pixel = pixel_list[0]
pixel.setChannels(chans)
return pixel
def pixelmod(pixels, modifiers):
'''
Apply modifiers to list of pixels/pixellists
Results in a combination of pixel capabilities
'''
pixelcap_list = []
for pixel in pixels:
# Convert HIDIds into PixelIds
if isinstance(pixel, HIDId) or isinstance(pixel, ScanCodeId):
pixel = PixelId(pixel)
pixel.setModifiers(modifiers)
pixelcap_list.append(pixel)
return pixelcap_list
def pixel_address(elems):
'''
Parse pixel positioning for row/column addressing
@param elems: index list or (operator, value)
40
c:0
c:30%
r:i+30
'''
pixel_address_list = []
# Index list
if isinstance(elems, list):
# List of integers, possibly a range
if isinstance(elems[0], int):
for elem in elems:
pixel_address_list.append(PixelAddressId(index=elem))
# Already ready to append
elif isinstance(elems[0], PixelId):
pixel_address_list.append(elems[0])
# No value
elif isinstance(elems, Token):
# Row
if "r:i" in elems.name:
pixel_address_list.append(PixelAddressId(relRow=0))
# Column
if "c:i" in elems.name:
pixel_address_list.append(PixelAddressId(relCol=0))
# Operator with value
elif isinstance(elems[0], Token):
# Prepare address value
value = elems[1]
# Positioning
if elems[0].type == "ColRowOperator":
# Row
if elems[0].name == "r:":
pixel_address_list.append(PixelAddressId(row=value))
# Column
if elems[0].name == "c:":
pixel_address_list.append(PixelAddressId(col=value))
# Relative Positioning
elif elems[0].type == "RelCROperator":
if '-' in elems[0].name:
value *= -1
# Row
if "r:i" in elems[0].name:
pixel_address_list.append(PixelAddressId(relRow=value))
# Column
if "c:i" in elems[0].name:
pixel_address_list.append(PixelAddressId(relCol=value))
return pixel_address_list
def pixel_address_merge(elems):
'''
Merge pixel addresses together
'''
# Merge is only necessary if there is more than one element
if len(elems) > 1:
for elem in elems[1:]:
elems[0].merge(elem)
return [elems[0]]
def position(token):
'''
Physical position split
x:20 -> (x, 20)
'''
return token.split(':')
def usbCode_number(token):
'''
USB Keyboard HID Code lookup
'''
return HIDId('USBCode', token.value, token.locale)
def consCode_number(token):
'''
Consumer Control HID Code lookup
'''
return HIDId('ConsCode', token.value, token.locale)
def sysCode_number(token):
'''
System Control HID Code lookup
'''
return HIDId('SysCode', token.value, token.locale)
def indCode_number(token):
'''
Indicator HID Code lookup
'''
return HIDId('IndCode', token.value, token.locale)
def none(token):
'''
Replace key-word with NoneId specifier (which indicates a noneOut capability)
'''
return [[[NoneId()]]]
def seqString(token, spec='lspec'):
'''
Converts sequence string to a sequence of combinations
'Ab' -> U"Shift" + U"A", U"B"
'abb' -> U"A", U"B", U"NoEvent", U"B"
@param spec: 'lspec' or 'rspec'
'''
# Determine locale
locale = token.locale
# Compose string using set locale
sequence = None
if spec == 'lspec':
sequence = locale.compose(token.value[1:-1], minimal_clears=True, no_clears=True)
else:
sequence = locale.compose(token.value[1:-1], minimal_clears=True)
# Convert each element in sequence of combos to HIDIds
hid_ids = []
for combo in sequence:
new_combo = []
for elem in combo:
# Lookup uid (usb code) from alias name (used in sequence)
new_elem = HIDId('USBCode', int(locale.json()['from_hid_keyboard'][elem], 0), locale)
new_combo.append(new_elem)
hid_ids.append(new_combo)
return hid_ids
def seqStringL(token):
'''
Converts sequence string to a sequence of combinations
lspec side
'Ab' -> U"Shift" + U"A", U"B"
'abb' -> U"A", U"B", U"NoEvent", U"B"
'''
return Make.seqString(token, 'lspec')
def seqStringR(token):
'''
Converts sequence string to a sequence of combinations
rspec side
'Ab' -> U"Shift" + U"A", U"B"
'abb' -> U"A", U"B", U"NoEvent", U"B"
'''
return Make.seqString(token, 'rspec')
def string(token):
'''
Converts a raw string to a Python string
"this string" -> this string
'''
return token[1:-1]
def unseqString(token):
'''
Converts a raw sequence string to a Python string
'this string' -> this string
'''
return token[1:-1]
def number(token):
'''
Convert string number to Python integer
'''
return int(token, 0)
def neg_number(dash, number):
'''
If a dash is provided, then the number is negative
'''
if dash is not None:
number = number * -1
return number
def numberToken(token):
'''
Convert token value to Python integer
'''
try:
token.value = int(token.value, 0)
except TypeError:
pass
return token
def percent(token):
'''
Convert string percent to Python float
'''
return int(token[:-1], 0) / 100.0
def timing(token):
'''
Convert raw timing parameter to integer time and determine units
1ms -> 1, ms
'''
# Find ms, us, or s
if 'ms' in token:
unit = 'ms'
num = token.split('m')[0]
elif 'us' in token:
unit = 'us'
num = token.split('u')[0]
elif 'ns' in token:
unit = 'ns'
num = token.split('n')[0]
elif 's' in token:
unit = 's'
num = token.split('s')[0]
else:
print("{0} cannot find timing unit in token '{1}'".format(ERROR, token))
return Time(float(num), unit)
def specifierTiming(timing):
'''
When only timing is given, infer state at a later stage from the context of the mapping
'''
return ScheduleParam(None, timing)
def specifierState(state, timing=None):
'''
Generate a Schedule Parameter
Automatically mutates itself into the correct object type
'''
return ScheduleParam(state, timing)
def specifierAnalog(value):
'''
Generate an Analog Schedule Parameter
'''
return AnalogScheduleParam(value)
def specifierUnroll(identifier, schedule_params):
'''
Unroll specifiers into the trigger/result identifier
First, combine all Schedule Parameters into a Schedul
Then attach Schedule to the identifier
If the identifier is a list, then iterate through them
and apply the schedule to each
'''
# Check if this is a list of identifiers
if isinstance(identifier, list):
for ident in identifier:
ident.setSchedule(schedule_params)
return identifier
else:
identifier.setSchedule(schedule_params)
return [identifier]
def layerTypeIdent(layer_type, inner_list, specifier):
'''
Given a layer expression, determine what kind of layer expression
Layer
LayerShift
LayerLatch
LayerLock
'''
# Determine layer type (remove [)
layer_type = layer_type[:-1]
# Add layer type to each given layer
identifier_list = []
for layer in inner_list:
identifier_list.append(LayerId(layer_type, layer))
return identifier_list, specifier
def genericTriggerIdent(identifier, code, specifier):
'''
Given a generic trigger, create a TriggerId object
Generic Triggers don't support ranges
'''
trigger_obj = TriggerId(identifier, code)
return trigger_obj, specifier
# Range can go from high to low or low to high
def scanCode_range(rangeVals):
'''
Scan Code range expansion
S[0x10-0x12] -> S0x10, S0x11, S0x12
'''
start = rangeVals[0]
end = rangeVals[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
values = list(range(start, end + 1))
# Generate ScanCodeIds
return [ScanCodeId(v) for v in values]
# Range can go from high to low or low to high
# Warn on 0-9 for USBCodes (as this does not do what one would expect) TODO
# Lookup USB HID tags and convert to a number
def hidCode_range(type, rangeVals):
'''
HID Code range expansion
U["A"-"C"] -> U"A", U"B", U"C"
'''
# Check if already integers
if isinstance(rangeVals[0], int):
start = rangeVals[0]
else:
start = Make.hidCode(type, rangeVals[0]).uid
if isinstance(rangeVals[1], int):
end = rangeVals[1]
else:
end = Make.hidCode(type, rangeVals[1]).uid
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
listRange = list(range(start, end + 1))
# Determine locale
locale = rangeVals[0].locale
# Convert each item in the list to a tuple
for item in range(len(listRange)):
listRange[item] = HIDId(type, listRange[item], locale)
return listRange
def usbCode_range(rangeVals):
'''
USB Keyboard HID Code range expansion
'''
return Make.hidCode_range('USBCode', rangeVals)
def sysCode_range(rangeVals):
'''
System Control HID Code range expansion
'''
return Make.hidCode_range('SysCode', rangeVals)
def consCode_range(rangeVals):
'''
Consumer Control HID Code range expansion
'''
return Make.hidCode_range('ConsCode', rangeVals)
def indCode_range(rangeVals):
'''
Indicator HID Code range expansion
'''
return Make.hidCode_range('IndCode', rangeVals)
def range(start, end):
'''
Converts a start and end points of a range to a list of numbers
Can go low to high or high to low
'''
# High to low
if end < start:
return list(range(end, start + 1))
# Low to high
return list(range(start, end + 1))
def capArg(argument, width=None):
'''
Converts a capability argument:width to a CapArgId
If no width is specified, it is ignored
'''
return CapArgId(argument, width)
def capArgValue(tuple_value):
'''
Converts a capability argument value to a CapArgValue
'''
sign, value = tuple_value
if sign is not None:
value *= -1
return CapArgValue(value)
def capUsage(name, arguments):
'''
Converts a capability tuple, argument list to a CapId Usage
'''
return CapId(name, 'Capability', arguments)
def debug(tokens):
'''
Just prints tokens
Used for debugging
'''
print(tokens)
return tokens
### Rules ###
# Base Rules
def const(x): return lambda _: x
def unarg(f): return lambda x: f(*x)
def flatten(list): return sum(list, [])
def tokenValue(x):
'''
Return string value of a token
@param x: Token
@returns: String value of token
'''
return x.value
def tokenType(t):
'''
Returns string of token
@param t: Name of token type
@returns: String of token
'''
return some(lambda x: x.type == t) >> tokenValue
def tokenTypeOnly(t):
'''
Returns the full token object
@param t: Name of token type
@return: Token matching
'''
return some(lambda x: x.type == t)
def operator(s): return a(Token('Operator', s)) >> tokenValue
def parenthesis(s): return a(Token('Parenthesis', s)) >> tokenValue
def bracket(s): return a(Token('Bracket', s)) >> tokenValue
eol = a(Token('EndOfLine', ';'))
def maybeFlatten(items):
'''
Iterate through top-level lists
Flatten, only if the element is also a list
[[1,2],3,[[4,5]]] -> [1,2,3,[4,5]]
'''
new_list = []
for elem in items:
# Flatten only if a list
if isinstance(elem, list):
new_list.extend(elem)
else:
new_list.append(elem)
return new_list
def listElem(item):
'''
Convert to a list element
'''
return [item]
def listToTuple(items):
'''
Convert list to a tuple
'''
return tuple(items)
def oneLayerFlatten(items):
'''
Flatten only the top layer (list of lists of ...)
'''
mainList = []
for sublist in items:
for item in sublist:
mainList.append(item)
return mainList
def optionCompression(sequence):
'''
Adds another dimension to a list of lists.
This is the inverse operation of optionExpansion, iff there were no expanded ranges
@param: sequence: Sequence of combos
@returns: Squence of combos of ranges
'''
new_list = []
for combo in sequence:
new_combo = []
for elem in combo:
new_combo.append([elem])
new_list.append(new_combo)
return new_list
def optionExpansion(sequences):
'''
Expand ranges of values in the 3rd dimension of the list, to a list of 2nd lists
i.e. [ sequence, [ combo, [ range ] ] ] --> [ [ sequence, [ combo ] ], , ]
@param sequences: Sequence of combos of ranges
@returns: List of sequences of combos
'''
expandedSequences = []
# Total number of combinations of the sequence of combos that needs to be generated
totalCombinations = 1
# List of leaf lists, with number of leaves
maxLeafList = []
# Traverse to the leaf nodes, and count the items in each leaf list
for sequence in sequences:
for combo in sequence:
rangeLen = len(combo)
totalCombinations *= rangeLen
maxLeafList.append(rangeLen)
# Counter list to keep track of which combination is being generated
curLeafList = [0] * len(maxLeafList)
# Generate a list of permuations of the sequence of combos
for count in range(0, totalCombinations):
expandedSequences.append([]) # Prepare list for adding the new combination
pos = 0
# Traverse sequence of combos to generate permuation
for sequence in sequences:
expandedSequences[-1].append([])
for combo in sequence:
expandedSequences[-1][-1].append(combo[curLeafList[pos]])
pos += 1
# Increment combination tracker
for leaf in range(0, len(curLeafList)):
curLeafList[leaf] += 1
# Reset this position, increment next position (if it exists), then stop
if curLeafList[leaf] >= maxLeafList[leaf]:
curLeafList[leaf] = 0
if leaf + 1 < len(curLeafList):
curLeafList[leaf + 1] += 1
return expandedSequences
def listit(t):
'''
Convert tuple of tuples to list of lists
'''
return list(map(listit, t)) if isinstance(t, (list, tuple)) else t
def tupleit(t):
'''
Convert list of lists to tuple of tuples
'''
return tuple(map(tupleit, t)) if isinstance(t, (tuple, list)) else t
# Sub Rules
usbCode = tokenTypeOnly('USBCode') >> Make.usbCode
scanCode = tokenType('ScanCode') >> Make.scanCode
consCode = tokenTypeOnly('ConsCode') >> Make.consCode
sysCode = tokenTypeOnly('SysCode') >> Make.sysCode
indCode = tokenTypeOnly('IndCode') >> Make.indCode
animation = tokenType('Animation') >> Make.animation
pixel = tokenType('Pixel') >> Make.pixel
pixelLayer = tokenType('PixelLayer') >> Make.pixelLayer
none = tokenType('None') >> Make.none
position = tokenType('Position') >> Make.position
comma = tokenType('Comma')
content = tokenType('VariableContents')
dash = tokenType('Dash')
name = tokenType('Name')
number = tokenType('Number') >> Make.number
neg_number = maybe(dash) + number >> unarg(Make.neg_number)
numberToken = tokenTypeOnly('Number') >> Make.numberToken
percent = tokenType('Percent') >> Make.percent
plus = tokenType('Plus')
timing = tokenType('Timing') >> Make.timing
string = tokenType('String') >> Make.string
unString = tokenTypeOnly('String') # When the double quotes are still needed for internal processing
seqStringL = tokenTypeOnly('SequenceStringL') >> Make.seqStringL >> optionCompression # lspec
seqStringR = tokenTypeOnly('SequenceStringR') >> Make.seqStringR >> optionCompression # rspec
unseqString = tokenType('SequenceString') >> Make.unseqString # For use with variables
def colRowOperator(s): return a(Token('ColRowOperator', s))
def relCROperator(s): return a(Token('RelCROperator', s))
pixelOperator = tokenType('PixelOperator')
# Code variants
code_begin = tokenType('CodeBegin')
code_end = tokenType('CodeEnd')
# Specifier
specifier_basic = (timing >> Make.specifierTiming) | (name >> Make.specifierState)
specifier_complex = (name + skip(operator(':')) + timing) >> unarg(Make.specifierState)
specifier_state = specifier_complex | specifier_basic
specifier_analog = number >> Make.specifierAnalog
specifier_list = skip(parenthesis('(')) + many((specifier_state | specifier_analog) + skip(maybe(comma))) + skip(parenthesis(')'))
# Scan Codes
scanCode_start = tokenType('ScanCodeStart')
scanCode_range = number + skip(dash) + number >> Make.scanCode_range
scanCode_listElem = number >> Make.scanCode
scanCode_specifier = (scanCode_range | scanCode_listElem) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
scanCode_innerList = many(scanCode_specifier + skip(maybe(comma))) >> flatten
scanCode_expanded = skip(scanCode_start) + scanCode_innerList + skip(code_end) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
scanCode_elem = scanCode + maybe(specifier_list) >> unarg(Make.specifierUnroll)
scanCode_combo_elem = scanCode_expanded | scanCode_elem
scanCode_single = (skip(scanCode_start) + scanCode_listElem + skip(code_end)) | scanCode
scanCode_il_nospec = oneplus((scanCode_range | scanCode_listElem) + skip(maybe(comma)))
scanCode_nospecifier = skip(scanCode_start) + scanCode_il_nospec + skip(code_end)
# Cons Codes
consCode_start = tokenType('ConsCodeStart')
consCode_number = numberToken >> Make.consCode_number
consCode_range = (consCode_number | unString) + skip(dash) + (number | unString) >> Make.consCode_range
consCode_listElemTag = unString >> Make.consCode
consCode_listElem = (consCode_number | consCode_listElemTag)
consCode_specifier = (consCode_range | consCode_listElem) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
consCode_innerList = oneplus(consCode_specifier + skip(maybe(comma))) >> flatten
consCode_expanded = skip(consCode_start) + consCode_innerList + skip(code_end) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
consCode_elem = consCode + maybe(specifier_list) >> unarg(Make.specifierUnroll)
consCode_il_nospec = oneplus((consCode_range | consCode_listElem) + skip(maybe(comma)))
consCode_nospecifier = skip(consCode_start) + consCode_il_nospec + skip(code_end)
# Sys Codes
sysCode_start = tokenType('SysCodeStart')
sysCode_number = numberToken >> Make.sysCode_number
sysCode_range = (sysCode_number | unString) + skip(dash) + (number | unString) >> Make.sysCode_range
sysCode_listElemTag = unString >> Make.sysCode
sysCode_listElem = (sysCode_number | sysCode_listElemTag)
sysCode_specifier = (sysCode_range | sysCode_listElem) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
sysCode_innerList = oneplus(sysCode_specifier + skip(maybe(comma))) >> flatten
sysCode_expanded = skip(sysCode_start) + sysCode_innerList + skip(code_end) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
sysCode_elem = sysCode + maybe(specifier_list) >> unarg(Make.specifierUnroll)
sysCode_il_nospec = oneplus((sysCode_range | sysCode_listElem) + skip(maybe(comma)))
sysCode_nospecifier = skip(sysCode_start) + sysCode_il_nospec + skip(code_end)
# Indicator Codes
indCode_start = tokenType('IndicatorStart')
indCode_number = numberToken >> Make.indCode_number
indCode_range = (indCode_number | unString) + skip(dash) + (number | unString) >> Make.indCode_range
indCode_listElemTag = unString >> Make.indCode
indCode_listElem = (indCode_number | indCode_listElemTag)
indCode_specifier = (indCode_range | indCode_listElem) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
indCode_innerList = oneplus(indCode_specifier + skip(maybe(comma))) >> flatten
indCode_expanded = skip(indCode_start) + indCode_innerList + skip(code_end) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
indCode_elem = indCode + maybe(specifier_list) >> unarg(Make.specifierUnroll)
indCode_il_nospec = oneplus((indCode_range | indCode_listElem) + skip(maybe(comma)))
indCode_nospecifier = skip(indCode_start) + indCode_il_nospec + skip(code_end)
# USB Codes
usbCode_start = tokenType('USBCodeStart')
usbCode_number = numberToken >> Make.usbCode_number
usbCode_range = (usbCode_number | unString) + skip(dash) + (number | unString) >> Make.usbCode_range
usbCode_listElemTag = unString >> Make.usbCode
usbCode_listElem = (usbCode_number | usbCode_listElemTag)
usbCode_specifier = (usbCode_range | usbCode_listElem) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
usbCode_il_nospec = oneplus((usbCode_range | usbCode_listElem) + skip(maybe(comma)))
usbCode_nospecifier = skip(usbCode_start) + usbCode_il_nospec + skip(code_end)
usbCode_innerList = oneplus(usbCode_specifier + skip(maybe(comma))) >> flatten
usbCode_expanded = skip(usbCode_start) + usbCode_innerList + skip(code_end) + maybe(specifier_list) >> unarg(Make.specifierUnroll)
usbCode_elem = usbCode + maybe(specifier_list) >> unarg(Make.specifierUnroll)
# HID Codes
hidCode_elem = usbCode_expanded | usbCode_elem | sysCode_expanded | sysCode_elem | consCode_expanded | consCode_elem | indCode_expanded | indCode_elem
# Layers
layer_start = tokenType('LayerStart')
layer_range = (number) + skip(dash) + (number) >> unarg(Make.range)
layer_listElem = number >> listElem
layer_innerList = oneplus((layer_range | layer_listElem) + skip(maybe(comma))) >> flatten
layer_expanded = layer_start + layer_innerList + skip(code_end) + maybe(specifier_list) >> unarg(Make.layerTypeIdent) >> unarg(Make.specifierUnroll)
# Generic Triggers
gtrigger_start = tokenType('TriggerStart')
gtrigger_parts = skip(gtrigger_start) + number + skip(comma) + number + skip(code_end) + maybe(specifier_list)
gtrigger_expanded = gtrigger_parts >> unarg(Make.genericTriggerIdent) >> unarg(Make.specifierUnroll)
# Pixels
pixel_start = tokenType('PixelStart')
pixel_range = (number) + skip(dash) + (number) >> unarg(Make.range) >> Make.pixel_address
pixel_listElem = number >> listElem >> Make.pixel_address
pixel_pos = (colRowOperator('c:') | colRowOperator('r:')) + (neg_number | percent) >> Make.pixel_address
pixel_posRel = (relCROperator('c:i+') | relCROperator('c:i-') | relCROperator('r:i+') | relCROperator('r:i-')) + (neg_number | percent) >> Make.pixel_address
pixel_posRelHere = (relCROperator('c:i') | relCROperator('r:i')) >> Make.pixel_address
pixel_posMerge = oneplus((pixel_pos | pixel_posRel | pixel_posRelHere) + skip(maybe(comma))) >> flatten >> Make.pixel_address_merge
pixel_innerList = ((oneplus((pixel_range | pixel_listElem | pixel_posMerge) + skip(maybe(comma))) >> flatten) | (pixel_posMerge)) >> Make.pixel_list
pixel_expanded = skip(pixel_start) + pixel_innerList + skip(code_end)
pixel_elem = pixel >> listElem >> Make.pixel_address
# Pixel Layer
pixellayer_start = tokenType('PixelLayerStart')
pixellayer_range = (number) + skip(dash) + (number) >> unarg(Make.range)
pixellayer_listElem = number >> listElem
pixellayer_innerList = oneplus((pixellayer_range | pixellayer_listElem) + skip(maybe(comma))) >> flatten >> Make.pixelLayer_list
pixellayer_expanded = skip(pixellayer_start) + pixellayer_innerList + skip(code_end)
pixellayer_elem = pixelLayer >> listElem
# Pixel Channels
pixelchan_chans = many(number + skip(operator(':')) + number + skip(maybe(comma)))
pixelchan_elem = ((pixel_expanded | pixel_elem) + skip(parenthesis('(')) + pixelchan_chans + skip(parenthesis(')'))) >> unarg(Make.pixelchan)
# HID Id for Pixel Mods
pixelmod_hid_elem = (usbCode | sysCode | consCode | indCode | scanCode) >> listElem
pixelmod_hid = pixelmod_hid_elem | usbCode_nospecifier | scanCode_nospecifier | consCode_nospecifier | sysCode_nospecifier | indCode_nospecifier
# Pixel Mods
pixelmod_modop = maybe(pixelOperator | plus | dash) >> listElem
pixelmod_modva = number >> listElem
pixelmod_mods = oneplus((pixelmod_modop + pixelmod_modva + skip(maybe(comma))) >> flatten)
pixelmod_layer = (pixellayer_expanded | pixellayer_elem)
pixelmod_index = (pixel_expanded | pixel_elem | pixelmod_hid | pixelmod_layer)
pixelmod_elem = pixelmod_index + skip(parenthesis('(')) + pixelmod_mods + skip(parenthesis(')')) >> unarg(Make.pixelmod)
# Pixel Capability
pixel_capability = pixelmod_elem
# Animations
animation_start = tokenType('AnimationStart')
animation_name = name
animation_frame_range = (number) + skip(dash) + (number) >> unarg(Make.range)
animation_name_frame = many((animation_frame_range | number) + skip(maybe(comma))) >> maybeFlatten
animation_def = skip(animation_start) + animation_name + skip(code_end) >> Make.animation
animation_expanded = skip(animation_start) + animation_name + skip(maybe(comma)) + animation_name_frame + skip(code_end) >> unarg(Make.animationAssociation)
animation_trigger = skip(animation_start) + animation_name + skip(code_end) + maybe(specifier_list) >> unarg(Make.animationTrigger) >> unarg(Make.specifierUnroll)
animation_flattened = animation_expanded >> flatten >> flatten
animation_elem = animation
# Animation Modifier
animation_modifier_arg = number | (name + skip(parenthesis('(')) + many(number + skip(maybe(comma))) + skip(parenthesis(')'))) | name
animation_modifier = many((name | number) + maybe(skip(operator(':')) + animation_modifier_arg) + skip(maybe(comma)))
animation_modlist = animation_modifier >> Make.animationModlist
# Animation Capability
animation_capability = ((animation_def | animation_elem) + maybe(skip(parenthesis('(')) + animation_modifier + skip(parenthesis(')')))) >> unarg(Make.animationCapability)
# Capabilities
capFunc_argument = (maybe(dash) + number) >> Make.capArgValue # TODO Allow for symbolic arguments, i.e. arrays and variables
capFunc_arguments = many(capFunc_argument + skip(maybe(comma)))
capFunc_elem = name + skip(parenthesis('(')) + capFunc_arguments + skip(parenthesis(')')) >> unarg(Make.capUsage) >> listElem
capFunc_combo = oneplus((hidCode_elem | capFunc_elem | animation_capability | pixel_capability | layer_expanded) + skip(maybe(plus))) >> listElem
capFunc_sequence = oneplus((capFunc_combo | seqStringR) + skip(maybe(comma))) >> oneLayerFlatten
# Trigger / Result Codes
triggerCode_combo = oneplus((scanCode_combo_elem | hidCode_elem | layer_expanded | animation_trigger | gtrigger_expanded) + skip(maybe(plus))) >> listElem
triggerCode_sequence = oneplus((triggerCode_combo | seqStringL | seqStringR) + skip(maybe(comma))) >> oneLayerFlatten
triggerCode_outerList = triggerCode_sequence >> optionExpansion
resultCode_outerList = ((capFunc_sequence >> optionExpansion) | none)
# Positions
position_list = oneplus(position + skip(maybe(comma)))
PK RKM0m kll/common/position.py#!/usr/bin/env python3
'''
KLL Position Containers
'''
# Copyright (C) 2016-2017 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Position:
'''
Identifier position
Each position can have up to 6 different types of measurements
Distance:
x
y
z
Angular:
ry
ry
rz
'''
_parameters = ['x', 'y', 'z', 'rx', 'ry', 'rz']
x = None
y = None
z = None
rx = None
ry = None
rz = None
def __init(self):
# Set all the _parameters to None
for param in self._parameters:
setattr(self, param, None)
def positionSet(self):
'''
Returns True if any position has been set
'''
for param in self._parameters:
if getattr(self, param) is not None:
return True
return False
def isPositionSet(self):
'''
Check if a position is set
@return: True if any position is not None
'''
for param in self._parameters:
value = getattr(self, param)
if value is not None:
return True
return False
def setPosition(self, positions):
'''
Applies given list of position measurements
None signifies an undefined position which may be assigned at a later point.
Otherwise, it will be set to 0 at a later stage
If a position is already set, do not overwrite, expressions are read inside->out
'''
for position in positions:
name = position[0]
value = position[1]
# Check to make sure parameter is valid
if name not in self._parameters:
print("{0} '{1}' is not a valid position parameter.".format(ERROR, name))
continue
# Only set if None
if getattr(self, name) is None:
setattr(self, name, value)
def updatePositions(self, position):
'''
Using another Position object update positions
All positions are overwritten, unless set to None in the new position set
@param position: Position object with new positions
'''
for param in position._parameters:
value = getattr(position, param)
if value is not None:
setattr(self, param, value)
def strPosition(self):
'''
__repr__ of Position when multiple inheritance is used
'''
output = ""
# Check each of the position parameters, only show the ones that are not None
count = 0
for param in self._parameters:
value = getattr(self, param)
if value is not None:
if count > 0:
output += ","
output += "{0}:{1}".format(param, value)
count += 1
return output
def json(self):
'''
JSON representation of Position
'''
# TODO (HaaTa) Add
return {}
def __repr__(self):
return self.strPosition()
PK RKMLǖ8# 8# kll/common/schedule.py#!/usr/bin/env python3
'''
KLL Schedule Containers
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import numbers
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Time:
'''
Time parameter
'''
def __init__(self, time, unit):
self.time = time
self.unit = unit
def __repr__(self):
return "{0}{1}".format(self.time, self.unit)
class Schedule:
'''
Identifier schedule
Each schedule may have multiple parameters configuring how the element is scheduled
Used for trigger and result elements
'''
def __init__(self):
self.parameters = None
def setSchedule(self, parameters):
'''
Applies given list of Schedule Parameters to Schedule
None signifies an undefined schedule which allows free-form scheduling
at either a later stage or at the convenience of the device firmware/driver
If schedule is already set, do not overwrite, expressions are read inside->out
'''
# Ignore if already set
if self.parameters is not None or parameters is None:
return
# Morph parameter based on Schedule type
for param in parameters:
param.setType(self)
param.checkParam()
self.parameters = parameters
def strSchedule(self, kll=False):
'''
__repr__ of Schedule when multiple inheritance is used
'''
output = ""
if self.parameters is not None:
for index, param in enumerate(self.parameters):
if index > 0:
output += ","
output += "{0}".format(param.kllify())
return output
def json(self):
'''
JSON representation of Schedule
'''
output = dict()
output['schedule'] = []
if self.parameters is not None:
for param in self.parameters:
output['schedule'].append(param.json())
return output
def kllify(self):
'''
KLL representation of object
'''
return self.strSchedule(kll=True)
def __repr__(self):
return self.strSchedule()
class ScheduleParam:
'''
Schedule parameter
In the case of a Timing parameter, the base type is unknown and must be inferred later
'''
def __init__(self, state, timing=None):
'''
@param state: State identifier (string)
@param timing: Timing parameter
'''
self.state = state
self.timing = timing
self.parent = None
def setType(self, parent):
'''
Change class type to match the Schedule object
@param parent: Parent Schedule object
'''
self.parent = parent
if self.parent.__class__.__name__ in ["HIDId"] and self.parent.type == 'IndCode':
self.__class__ = IndicatorScheduleParam
elif self.parent.__class__.__name__ in ["LayerId"]:
self.__class__ = IndicatorScheduleParam
elif self.parent.__class__.__name__ in ["HIDId", "ScanCodeId", "TriggerId"]:
# Check if an analog
if isinstance(self.state, numbers.Number):
self.__class__ = AnalogScheduleParam
else:
self.__class__ = ButtonScheduleParam
elif self.parent.__class__.__name__ in ["AnimationId"]:
self.__class__ = AnimationScheduleParam
def checkParam(self):
'''
Validate that parameter is valid
@returns: If assigned state is valid for the assigned class
'''
# Check for invalid state
invalid_state = True
if self.state in ['P', 'H', 'R', 'O', 'UP', 'UR'] and self.__class__.__name__ == 'ButtonScheduleParam':
invalid_state = False
elif self.state in ['A', 'On', 'D', 'Off'] and self.__class__.__name__ == 'IndicatorScheduleParam':
invalid_state = False
elif self.state in ['D', 'R', 'O'] and self.__class__.__name__ == 'AnimationScheduleParam':
invalid_state = False
elif isinstance(self.state, numbers.Number) and self.__class__.__name__ == 'AnalogScheduleParam':
invalid_state = False
elif self.state is None and self.timing is not None:
invalid_state = False
if invalid_state:
print("{0} Invalid {2} state '{1}'".format(ERROR, self.state, self.__class__.__name__))
return not invalid_state
def setTiming(self, timing):
'''
Set parameter timing
'''
self.timing = timing
def json(self):
'''
JSON representation of ScheduleParam
'''
output = dict()
output['state'] = self.state
output['timing'] = self.timing
return output
def kllify(self):
'''
KLL representation of ScheduleParam object
'''
output = ""
if self.state is None and self.timing is not None:
output += "{0}".format(self.timing)
return output
def __repr__(self):
output = ""
if self.state is None and self.timing is not None:
output += "{0}".format(self.timing)
else:
output += "??"
print("{0} Unknown ScheduleParam state '{1}'".format(ERROR, self.state))
return output
class ButtonScheduleParam(ScheduleParam):
'''
Button Schedule Parameter
Accepts:
P - Press
H - Hold
R - Release
O - Off
UP - Unique Press
UR - Unique Release
Timing specifiers are valid.
Validity of specifiers are context dependent, and may error at a later stage, or be stripped altogether
'''
def __repr__(self):
output = ""
if self.state is not None:
output += "{0}".format(self.state)
if self.state is not None and self.timing is not None:
output += ":"
if self.timing is not None:
output += "{0}".format(self.timing)
return output
def kllify(self):
'''
KLL representation of object
'''
return "{0}".format(self)
class AnalogScheduleParam(ScheduleParam):
'''
Analog Schedule Parameter
Accepts:
Value from 0 to 100, indicating a percentage pressed
XXX: Might be useful to accept decimal percentages
'''
def __repr__(self):
output = ""
if self.state is not None:
output += "{0}".format(self.state)
if self.state is not None and self.timing is not None:
output += ":"
if self.timing is not None:
output += "{0}".format(self.timing)
return output
def kllify(self):
'''
KLL representation of object
'''
return "{0}".format(self.state)
class IndicatorScheduleParam(ScheduleParam):
'''
Indicator Schedule Parameter
Accepts:
A - Activate
On
D - Deactivate
Off
Timing specifiers are valid.
Validity of specifiers are context dependent, and may error at a later stage, or be stripped altogether
'''
def __repr__(self):
output = ""
if self.state is not None:
output += "{0}".format(self.state)
if self.state is not None and self.timing is not None:
output += ":"
if self.timing is not None:
output += "{0}".format(self.timing)
return output
def kllify(self):
'''
KLL representation of object
'''
return "{0}".format(self)
class AnimationScheduleParam(ScheduleParam):
'''
Animation Schedule Parameter
Accepts:
D - Done
R - Repeat
O - Off
Timing specifiers are valid.
Validity of specifiers are context dependent, and may error at a later stage, or be stripped altogether
'''
def __repr__(self):
output = ""
if self.state is not None:
output += "{0}".format(self.state)
if self.state is not None and self.timing is not None:
output += ":"
if self.timing is not None:
output += "{0}".format(self.timing)
return output
def kllify(self):
'''
KLL representation of object
'''
return "{0}".format(self)
PK RMrO3 3 kll/common/stage.py#!/usr/bin/env python3
'''
KLL Compiler Stage Definitions
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
from multiprocessing.dummy import Pool as ThreadPool
import copy
import io
import multiprocessing
import os
import re
import sys
import tempfile
import kll.common.context as context
import kll.common.expression as expression
import kll.common.file as file
import kll.common.id as id
import kll.emitters.emitters as emitters
from kll.extern.funcparserlib.lexer import make_tokenizer, Token, LexerError
from kll.extern.funcparserlib.parser import many, oneplus, maybe, skip, NoParseError, Parser_debug
from layouts import Layouts, Layout
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
ansi_escape = re.compile(r'\x1b[^m]*m')
### Classes ###
class ControlStage:
'''
Top-level Stage
Controls the order in which each stage is processed
'''
def __init__(self):
'''
Initialize stage objects and control variables
'''
# Initialized in process order
# NOTE: Only unique classes in this list, otherwise stage() will get confused
self.stages = [
CompilerConfigurationStage(self),
FileImportStage(self),
PreprocessorStage(self),
OperationClassificationStage(self),
OperationSpecificsStage(self),
OperationOrganizationStage(self),
DataOrganizationStage(self),
DataFinalizationStage(self),
DataAnalysisStage(self),
CodeGenerationStage(self),
#ReportGenerationStage( self ),
]
self.git_rev = None
self.git_changes = None
self.version = None
def stage(self, context_str):
'''
Returns the stage object of the associated string name of the class
@param context_str: String name of the class of the stage e.g. CompilerConfigurationStage
'''
return [stage for stage in self.stages if type(stage).__name__ is context_str][0]
def command_line_args(self, args):
'''
Capture commmand line arguments for each processing stage
@param args: Name space of processed arguments
'''
for stage in self.stages:
stage.command_line_args(args)
def command_line_flags(self, parser):
'''
Prepare group parser for each processing stage
@param parser: argparse setup object
'''
for stage in self.stages:
stage.command_line_flags(parser)
def process(self):
'''
Main processing section
Initializes each stage in order.
Each stage must complete before the next one begins.
'''
# Run report even if stage doesn't complete
run_report = False
for stage in self.stages:
stage.process()
# Make sure stage has successfully completed
if stage.status() != 'Completed':
print("{0} Invalid stage status '{1}' for '{2}'.".format(
ERROR,
stage.status(),
stage.__class__.__name__,
))
run_report = True
break
# Only need to explicitly run reports if there was a stage problem
# Otherwise reports are run automatically
if run_report:
# TODO
sys.exit(1)
class Stage:
'''
Base Stage Class
'''
def __init__(self, control):
'''
Stage initialization
@param control: ControlStage object, used to access data from other stages
'''
self.control = control
self.color = False
self._status = 'Queued'
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
print("{0} '{1}' '{2}' has not been implemented yet"
.format(
WARNING,
self.command_line_args.__name__,
type(self).__name__
)
)
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
print("{0} '{1}' '{2}' has not been implemented yet"
.format(
WARNING,
self.command_line_flags.__name__,
type(self).__name__
)
)
def process(self):
'''
Main procesing section
'''
self._status = 'Running'
print("{0} '{1}' '{2}' has not been implemented yet"
.format(
WARNING,
self.process.__name__,
type(self).__name__
)
)
self._status = 'Completed'
def status(self):
'''
Returns the current status of the Stage
Values:
Queued - Not yet run
Running - Currently running
Completed - Successfully completed
Incomplete - Unsuccessfully completed
'''
return self._status
class CompilerConfigurationStage(Stage):
'''
Compiler Configuration Stage
* Does initial setup of KLL compiler.
* Handles any global configuration that must be done before parsing can begin
'''
def __init__(self, control):
'''
Initialize compiler configuration variables
'''
super().__init__(control)
self.color = "auto"
self.jobs = multiprocessing.cpu_count()
self.pool = None
# Build list of emitters
self.emitters = emitters.Emitters(control)
self.emitter = self.emitters.emitter_default()
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.emitter = args.emitter
self.color = args.color
self.jobs = args.jobs
# Validate color argument before processing
if self.color not in ['auto', 'always', 'never']:
print("Invalid color option '{0}'".format(self.color))
sys.exit(2)
# TODO Detect whether colorization should be used
self.color = self.color in ['auto', 'always']
# Validate if it's a valid emitter
if self.emitter not in self.emitters.emitter_list():
print("{0} Invalid emitter '{1}'".format(ERROR, self.emitter))
print("Valid emitters: {0}".format(self.emitters.emitter_list()))
sys.exit(2)
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mCompiler Configuration\033[0m')
# Optional Arguments
group.add_argument('--emitter', type=str, default=self.emitter,
help="Specify target emitter for the KLL compiler.\n"
"\033[1mDefault\033[0m: {0}\n"
"\033[1mOptions\033[0m: {1}".format(self.emitter, self.emitters.emitter_list())
)
group.add_argument('--color', type=str, default=self.color,
help="Specify debug colorizer mode.\n"
"\033[1mDefault\033[0m: {0}\n"
"\033[1mOptions\033[0m: auto, always, never (auto attempts to detect support)".format(self.color)
)
group.add_argument('--jobs', type=int, default=self.jobs,
help="Specify max number of threads to use.\n"
"\033[1mDefault\033[0m: {0}".format(self.jobs)
)
def process(self):
'''
Compiler Configuration Processing
'''
self._status = 'Running'
# Initialize thread pool
self.pool = ThreadPool(self.jobs)
self._status = 'Completed'
class FileImportStage(Stage):
'''
FIle Import Stage
* Loads text of all files into memory
* Does initial sorting of KLL Contexts based upon command line arguments
'''
def __init__(self, control):
'''
Initialize file storage datastructures and variables
'''
super().__init__(control)
# These lists are order sensitive
self.generic_files = []
self.config_files = []
self.base_files = []
self.default_files = []
# This is a list of lists, each sub list is another layer in order from 1 to max
self.partial_files = []
# List of all files contained in KLLFile objects
self.kll_files = []
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.generic_files = args.generic
self.config_files = args.config
self.base_files = args.base
self.default_files = args.default
self.partial_files = args.partial
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mFile Context Configuration\033[0m')
# Positional Arguments
group.add_argument('generic', type=str, nargs='*', default=self.generic_files,
help="Auto-detect context of .kll files, defaults to a base map configuration."
)
# Optional Arguments
group.add_argument('--config', type=str, nargs='+', default=self.config_files,
help="Specify base configuration .kll files, earliest priority"
)
group.add_argument('--base', type=str, nargs='+', default=self.base_files,
help="Specify base map configuration, applied after config .kll files.\n"
"The base map is applied prior to all default and partial maps and is used as the basis for them."
)
group.add_argument('--default', type=str, nargs='+', default=self.default_files,
help="Specify .kll files to layer on top of the default map to create a combined map.\n"
"Also known as layer 0."
)
group.add_argument('--partial', type=str, nargs='+', action='append', default=self.partial_files,
help="Specify .kll files to generate partial map, multiple files per flag.\n"
"Each -p defines another partial map.\n"
"Base .kll files (that define the scan code maps) must be defined for each partial map."
)
def init_kllfile(self, path, file_context):
'''
Prepares a KLLFile object with the given context
@path: Path to the KLL file
@file_context: Type of file context, e.g. DefaultMapContext
'''
return file.KLLFile(path, file_context)
def process(self):
'''
Process each of the files, sorting them by command line argument context order
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Process each type of file
# Iterates over each file in the context list and creates a KLLFile object with a context and path
self.kll_files += map(
lambda path: self.init_kllfile(path, context.GenericContext()),
self.generic_files
)
self.kll_files += map(
lambda path: self.init_kllfile(path, context.ConfigurationContext()),
self.config_files
)
self.kll_files += map(
lambda path: self.init_kllfile(path, context.BaseMapContext()),
self.base_files
)
self.kll_files += map(
lambda path: self.init_kllfile(path, context.DefaultMapContext()),
self.default_files
)
# Partial Maps require a third parameter which specifies which layer it's in
for layer, files in enumerate(self.partial_files):
self.kll_files += map(
lambda path: self.init_kllfile(path, context.PartialMapContext(layer)),
files
)
# Validate that all the file paths exist, exit if any of the checks fail
if False in [path.check() for path in self.kll_files]:
self._status = 'Incomplete'
return
# Now that we have a full list of files and their given context, we can now read the files into memory
# Uses the thread pool to speed up processing
# Make sure processing was successful before continuing
pool = self.control.stage('CompilerConfigurationStage').pool
if False in pool.map(lambda kll_file: kll_file.read(), self.kll_files):
self._status = 'Incomplete'
return
self._status = 'Completed'
class PreprocessorStage(Stage):
'''
Preprocessor Stage
* Does initial split and decision of contexts
* Handles Preprocessor part of KLL
'''
def __init__(self, control):
'''
Initialize preprocessor configuration variables
'''
super().__init__(control)
self.preprocessor_debug = False
self.max_scan_code = [0]
self.min_scan_code = [0]
self.interconnect_scancode_offsets = [0]
self.kll_files = []
self.layout_mgr = None
self.layout_list = []
self.processed_save_path = "{temp}/kll".format(temp=tempfile.gettempdir())
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.preprocessor_debug = args.preprocessor_debug
self.processed_save_path = args.preprocessor_tmp_path
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mPreprocessor Configuration\033[0m')
# Optional Arguments
group.add_argument('--preprocessor-tmp-path', type=str, default=self.processed_save_path,
help="Work directory for preprocessor.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.processed_save_path)
)
group.add_argument('--preprocessor-debug', action='store_true', default=self.preprocessor_debug,
help="Enable debug output in the preprocessor."
)
def seed_context(self, kll_file):
'''
Build list of context
TODO Update later for proper preprocessor
Adds data from KLLFile into the Context
'''
kll_file.context.initial_context(kll_file.lines, kll_file.data, kll_file)
def apply_connect_ids(self):
'''
Uses computed connect_ids to apply to BaseMaps
Incoming order of the KLLFiles matters
Ignores other contexts
'''
current_id = 0
for kll_file in self.kll_files:
# Only applicable for BaseMapContext
if kll_file.context.__class__.__name__ == "BaseMapContext":
# Only update the current_id if it was set (not every file will have it set)
if kll_file.connect_id is not None:
current_id = kll_file.connect_id
kll_file.context.connect_id = current_id
# Otherwise, set as 0
else:
kll_file.context.connect_id = 0
def process_connect_ids(self, kll_file, apply_offsets):
lines = kll_file.data.splitlines()
# Basic Tokens Spec
# TODO Storing these somewhere central might be a reasonable idea
spec = [
('Comment', (r' *#.*', )),
('ScanCode', (r'S((0x[0-9a-fA-F]+)|([0-9]+))', )),
('Operator', (r'=>|<=|i:\+|i:-|i::|i:|:\+|:-|::|:|=', )),
('USBCode', (r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('NumberBase10', (r'(([1-9][0-9]*))', )),
('Number', (r'-?((0x[0-9a-fA-F]+)|(0|([1-9][0-9]*)))', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
('Misc', (r'.', )), # Everything else
]
# Tokens to filter out of the token stream
useless = ['Misc']
# Build tokenizer that appends unknown characters to Misc Token groups
# NOTE: This is technically slower processing wise, but allows for multi-stage tokenization
# Which in turn allows for parsing and tokenization rules to be simplified
tokenizer = make_tokenizer(spec)
# default Locale when un-defined
hid_mapping_name = 'default'
try:
most_recent_offset = 0
processed_lines = []
for line in lines:
tokens = [x for x in tokenizer(line) if x.type not in useless]
for l_element, mid_element, r_element in zip(tokens[0::3], tokens[1::3], tokens[2::3]):
# Look for HIDMapping variable
# If set, apply to context
# Also verify that the HIDMapping is valid
if (
l_element.value == "HIDMapping"
and
mid_element.value == "="
):
# Make sure this is a valid mapping
assert r_element.value in self.layout_list
# Set HID mapping for KLL Context
hid_mapping_name = r_element.value
# Preprocessor tag for offsetting the scancodes by a fixed amount
# This makes it eay to apply offsets to older files
# The scope for this term is for the current file
if (
l_element.value == "ScanCodeOffset"
and
mid_element.value == "="
and
r_element.type == "NumberBase10"
):
most_recent_offset = int(r_element.value)
# Preprocessor definition for the connectId
# TODO these should likely be defined in their own file somewhere else
if (
l_element.value == "ConnectId"
and
mid_element.value == "="
and
r_element.type == "NumberBase10"
):
self.most_recent_connect_id = int(r_element.value)
assert (self.most_recent_connect_id >= 0)
if self.preprocessor_debug:
print("Found connect ID! %s" % self.most_recent_connect_id)
if not apply_offsets:
# Set connect_id
kll_file.connect_id = self.most_recent_connect_id
# Making sure that the offsets exist
while (len(self.min_scan_code) <= self.most_recent_connect_id):
self.min_scan_code.append(sys.maxsize)
while (len(self.max_scan_code) <= self.most_recent_connect_id):
self.max_scan_code.append(0)
if apply_offsets:
assert (len(self.min_scan_code) > self.most_recent_connect_id)
assert (len(self.max_scan_code) > self.most_recent_connect_id)
assert (len(self.interconnect_scancode_offsets) > self.most_recent_connect_id)
if (
l_element.type == "ScanCode"
and
mid_element.value == ":"
and
r_element.type == "USBCode"
):
scan_code_int = int(l_element.value[1:], 0)
if not apply_offsets:
# Checking if the min/max values need to be updated. The values are guaranteed to exist
# in the previous step
if scan_code_int < self.min_scan_code[self.most_recent_connect_id]:
self.min_scan_code[self.most_recent_connect_id] = scan_code_int
if scan_code_int > self.max_scan_code[self.most_recent_connect_id]:
self.max_scan_code[self.most_recent_connect_id] = scan_code_int
if apply_offsets:
# Modifying the current line
# The result is determined by the scancode, the interconnect offset and the preprocess
# term for offset
scan_code_with_offset = (
scan_code_int +
self.interconnect_scancode_offsets[self.most_recent_connect_id] +
most_recent_offset
)
scan_code_with_offset_hex = "0x{:X}".format(scan_code_with_offset)
original_scancode_converted_hex = "0x{:X}".format(scan_code_int)
# Sanity checking if we are doing something wrong
if int(original_scancode_converted_hex, 16) != int(l_element.value[1:], 0):
print(
"{type} We might be converting the scancodes wrong."
" Original code: {original}, the converted code"
" {converted}".format(
type=ERROR,
original=l_element.value[1:],
converted=original_scancode_converted_hex
)
)
# Replacing the original scancode in the line
old_line = str(line)
line = line.replace(r_element.value[1:], scan_code_with_offset_hex)
if self.preprocessor_debug:
print("Applying offset {}".format(
self.interconnect_scancode_offsets[self.most_recent_connect_id]
))
print(
"Old line: {old_line}\n"
"Replacing {old_element} with"
"{new_element}".format(
old_line=old_line,
old_element=l_element.value[1:],
new_element=scan_code_with_offset_hex
)
)
print("New line: {}\n".format(line))
processed_lines.append(line)
except LexerError as err:
print(err)
print("{0} {1}:tokenize -> {2}:{3}".format(
ERROR,
self.__class__.__name__,
kll_file.path,
err.place[0],
))
# Set HID Mapping in context
kll_file.context.hid_mapping = self.layout_mgr.get_layout(hid_mapping_name)
# Applying the offsets to the kll objets, if appropriate
if apply_offsets:
new_data = os.linesep.join(processed_lines)
kll_file.data = new_data
kll_file.lines = processed_lines
def determine_scancode_offsets(self):
# Sanity check the min/max codes
assert (len(self.min_scan_code) is len(self.max_scan_code))
# Doing the actual work
self.interconnect_scancode_offsets = []
previous_max_offset = 0
for scancode_offset_for_id in self.max_scan_code:
self.interconnect_scancode_offsets.append(previous_max_offset)
previous_max_offset += scancode_offset_for_id
self.interconnect_scancode_offsets.append(previous_max_offset)
if self.preprocessor_debug:
print("Scancode offsets: {0}".format(self.interconnect_scancode_offsets))
def import_data_from_disk(self, kll_files):
for kll_file in kll_files:
kll_file.read()
def export_data_to_disk(self, kll_files):
paths = []
for kll_file in kll_files:
paths.append(kll_file.path)
common_path = os.path.commonprefix(paths)
for kll_file in kll_files:
# Outputting the file to disk, with a different filename
file_prefix = os.path.dirname(kll_file.path)
file_prefix = file_prefix.replace(common_path, "")
file_prefix = file_prefix.replace("\\", "_")
file_prefix = file_prefix.replace("/", "_")
base_filename = kll_file.filename()
# Handle multiple dots across multiple versions of Python 3
splits = base_filename.split(".")
extension = splits[-1]
filename = splits[0:-1]
processed_filename = "{prefix}@{filename}_processed.{extension}".format(
prefix=file_prefix,
filename=filename,
extension=extension
)
if self.preprocessor_debug:
print("Processed filename: %s" % processed_filename)
output_filename = '{processed_dir}/{filename}'.format(
processed_dir=self.processed_save_path,
filename=processed_filename
)
kll_file.write(output_filename, self.preprocessor_debug)
kll_file.path = output_filename
def gather_scancode_offsets(self, kll_files):
self.most_recent_connect_id = 0
for kll_file in kll_files:
self.process_connect_ids(kll_file, apply_offsets=False)
def apply_scancode_offsets(self, kll_files):
self.most_recent_connect_id = 0
for kll_file in kll_files:
self.process_connect_ids(kll_file, apply_offsets=True)
def process(self):
'''
Preprocessor Execution
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Acquire thread pool
pool = self.control.stage('CompilerConfigurationStage').pool
# Build list of layouts
#self.layout_mgr = Layouts(layout_path='/home/hyatt/Source/layouts')
self.layout_mgr = Layouts()
self.layout_list = self.layout_mgr.list_layouts()
# TODO
# Once the KLL Spec has preprocessor commands, there may be a risk of infinite/circular dependencies
# Please add a non-invasive way to avoid/warn/stop in this case -HaaTa
# First, since initial contexts have been populated, populate details
# TODO
# This step will change once preprocessor commands have been added
# Simply, this just takes the imported file data (KLLFile) and puts it in the context container
self.kll_files = self.control.stage('FileImportStage').kll_files
self.import_data_from_disk(self.kll_files)
self.gather_scancode_offsets(self.kll_files)
self.determine_scancode_offsets()
#self.apply_scancode_offsets(self.kll_files) # XXX (HaaTa) not necessary anymore
self.export_data_to_disk(self.kll_files)
if False in pool.map(self.seed_context, self.kll_files):
self._status = 'Incomplete'
return
# Apply connect ids
self.apply_connect_ids()
# Next, tokenize and parser the preprocessor KLL commands.
# NOTE: This may result in having to create more KLL Contexts and tokenize/parse again numerous times over
# TODO
if self.preprocessor_debug:
print("Preprocessor determined Min ScanCodes: {0}".format(self.min_scan_code))
print("Preprocessor determined Max ScanCodes: {0}".format(self.max_scan_code))
print("Preprocessor determined ScanCode offsets: {0}".format(self.interconnect_scancode_offsets))
self._status = 'Completed'
class OperationClassificationStage(Stage):
'''
Operation Classification Stage
* Sorts operations by type based on operator
* Tokenizes only operator pivots and left/right arguments
* Further tokenization and parsing occurs at a later stage
'''
def __init__(self, control):
'''
Initialize operation classification stage
'''
super().__init__(control)
self.tokenized_data = []
self.contexts = []
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mOperation Classification Configuration\033[0m')
def merge_tokens(self, token_list, token_type):
'''
Merge list of tokens into a single token
@param token_list: List of tokens
@param token_type: String name of token type
'''
# Initial token parameters
ret_token = Token(token_type, '')
# Set start/end positions of token
ret_token.start = token_list[0].start
ret_token.end = token_list[-1].end
# Build token value
for token in token_list:
ret_token.value += token.value
return ret_token
def tokenize(self, kll_context):
'''
Tokenize a single string
@param kll_context: KLL Context containing file data
'''
ret = True
# Basic Tokens Spec
spec = [
('Comment', (r' *#.*', )),
('Space', (r'[ \t]+', )),
('NewLine', (r'[\r\n]+', )),
# Tokens that will be grouped together after tokenization
# Ignored at this stage
# This is required to isolate the Operator tags
('Misc', (r'r?[xyz]:[0-9]+(.[0-9]+)?', )), # Position context
('Misc', (r'\([^\)]*\)', )), # Parenthesis context
('Misc', (r'\[[^\]]*\]', )), # Square bracket context
('Misc', (r'"[^"]*"', )), # Double quote context
('Misc', (r"'[^']*'", )), # Single quote context
('Operator', (r'=>|<=|i:\+|i:-|i::|i:|:\+|:-|::|:|=', )),
('EndOfLine', (r';', )),
# Everything else to be ignored at this stage
('Misc', (r'.', )), # Everything else
]
# Tokens to filter out of the token stream
#useless = [ 'Space', 'Comment' ]
useless = ['Comment', 'NewLine']
# Build tokenizer that appends unknown characters to Misc Token groups
# NOTE: This is technically slower processing wise, but allows for multi-stage tokenization
# Which in turn allows for parsing and tokenization rules to be simplified
tokenizer = make_tokenizer(spec)
# Tokenize and filter out useless tokens
try:
tokens = [x for x in tokenizer(kll_context.data) if x.type not in useless]
except LexerError as err:
print(err)
print("{0} {1}:tokenize -> {2}:{3}".format(
ERROR,
self.__class__.__name__,
kll_context.parent.path,
err.place[0],
))
# Merge Misc tokens delimited by Operator and EndOfLine tokens
kll_context.classification_token_data = []
new_token = []
last_operator = None
for token in tokens:
# Check for delimiter, append new_token if ready
if token.type in ['EndOfLine', 'Operator']:
# Determine the token type
token_type = 'LOperatorData'
if token.type == 'EndOfLine':
token_type = 'ROperatorData'
# If this is a 'misplaced' operator, set as Misc
if token_type == last_operator:
token.type = 'Misc'
new_token.append(token)
continue
if len(new_token) > 0:
# Build new token
kll_context.classification_token_data.append(
self.merge_tokens(new_token, token_type)
)
new_token = []
kll_context.classification_token_data.append(token)
last_operator = token_type
# Collect Misc tokens
elif token.type in ['Misc', 'Space']:
new_token.append(token)
# Invalid token for this stage
else:
print("{0} Invalid token '{1}' for '{2}'".format(
ERROR,
token,
type(self).__name__,
))
ret = False
return ret
def sort(self, kll_context):
'''
Sorts tokenized data into expressions
LOperatorData + Operator + ROperatorData + EndOfLine
@param kll_context: KLL Context, contains tokenized data
'''
ret = True
def validate_token(token, token_type):
'''
Validate token
@param token: Given token to validate
@param token_type: Token type to validate against
@return True if the token is correct
'''
ret = token.type == token_type
# Error message
if not ret:
print("Expected: '{0}' got '{1}':{2} '{3}'".format(
token_type,
token.type,
token._pos_str(),
token.value,
))
return ret
tokens = kll_context.classification_token_data
for index in range(0, len(tokens), 4):
# Make sure enough tokens exist
if index + 3 >= len(tokens):
print("Not enough tokens left: {0}".format(tokens[index:]))
print("Expected: LOperatorData, Operator, ROperatorData, EndOfLine")
print("{0} {1}:sort -> {2}:{3}".format(
ERROR,
self.__class__.__name__,
kll_context.parent.path,
tokens[-1].start[0],
))
ret = False
break
# Validate the tokens are what was expected
ret = validate_token(tokens[index], 'LOperatorData') and ret
ret = validate_token(tokens[index + 1], 'Operator') and ret
ret = validate_token(tokens[index + 2], 'ROperatorData') and ret
ret = validate_token(tokens[index + 3], 'EndOfLine') and ret
# Append expression
kll_context.expressions.append(
expression.Expression(tokens[index], tokens[index + 1], tokens[index + 2], kll_context)
)
return ret
def process(self):
'''
Compiler Configuration Processing
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Acquire thread pool
pool = self.control.stage('CompilerConfigurationStage').pool
# Get list of KLLFiles
kll_files = self.control.stage('FileImportStage').kll_files
# Build list of contexts
self.contexts = [kll_file.context for kll_file in kll_files]
# Tokenize operators
# TODO
# Once preprocessor includes are implemented use a second kll_files list
# This way processing doesn't have to be recursive for a few stages -HaaTa
if False in pool.map(self.tokenize, self.contexts):
self._status = 'Incomplete'
return
# Sort elements into expressions
# LOperatorData + Operator + ROperatorData + EndOfLine
if False in pool.map(self.sort, self.contexts):
self._status = 'Incomplete'
return
self._status = 'Completed'
class OperationSpecificsStage(Stage):
'''
Operation Specifics Stage
* For each sorted operation, tokenize and parse the left/right arguments
* Data is stored with the operation, but no context is given to the data beyond the argument types
'''
def __init__(self, control):
'''
Initialize operation specifics stage
'''
super().__init__(control)
self.parser_debug = False
self.parser_token_debug = False
self.token_debug = False
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.parser_debug = args.parser_debug
self.parser_token_debug = args.parser_token_debug
self.token_debug = args.token_debug
# Auto-set parser_debug if parser_token_debug is set
if self.parser_token_debug:
self.parser_debug = True
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mOperation Specifics Configuration\033[0m')
# Optional Arguments
group.add_argument('--parser-debug', action='store_true', default=self.parser_debug,
help="Enable parser debug output.\n",
)
group.add_argument('--parser-token-debug', action='store_true', default=self.parser_token_debug,
help="Enable parser-stage token debug output.\n",
)
group.add_argument('--token-debug', action='store_true', default=self.token_debug,
help="Enable tokenization debug output.\n",
)
## Tokenizers ##
def tokenize_base(self, kll_expression, lspec, rspec):
'''
Base tokenization logic for this stage
@param kll_expression: KLL expression to tokenize
@param lspec: Regex tokenization spec for the left parameter
@param rspec: Regex tokenization spec for the right parameter
@return False if a LexerError was detected
'''
# Build tokenizers for lparam and rparam
ltokenizer = make_tokenizer(lspec)
rtokenizer = make_tokenizer(rspec)
# Tokenize lparam and rparam
# Ignore the generators, not useful in this case (i.e. use list())
err_pos = [] # Error positions
try:
kll_expression.lparam_sub_tokens = list(ltokenizer(kll_expression.lparam_token.value))
for token in kll_expression.lparam_sub_tokens:
token.locale = kll_expression.context.hid_mapping
except LexerError as err:
# Determine place in constructed expression
err_pos.append(err.place[1])
print(type(err).__name__, err)
try:
kll_expression.rparam_sub_tokens = list(rtokenizer(kll_expression.rparam_token.value))
for token in kll_expression.rparam_sub_tokens:
token.locale = kll_expression.context.hid_mapping
except LexerError as err:
# Determine place in constructed expression
err_pos.append(err.place[1] + kll_expression.rparam_start())
print(type(err).__name__, err)
# Display more information if any errors were detected
if len(err_pos) > 0:
print(kll_expression.point_chars(err_pos))
return False
return True
def tokenize_name_association(self, kll_expression):
'''
Tokenize lparam and rparam in name association expressions
=> ;
'''
# Define tokenization regex
lspec = [
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
('Space', (r'[ \t]+', )),
]
rspec = [
('Space', (r'[ \t]+', )),
('Parenthesis', (r'\(|\)', )),
('Operator', (r':', )),
('Comma', (r',', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
('Number', (r'-?((0x[0-9a-fA-F]+)|(0|([1-9][0-9]*)))', )),
]
# Tokenize, expression stores the result, status is returned
return self.tokenize_base(kll_expression, lspec, rspec)
def tokenize_data_association(self, kll_expression):
'''
Tokenize lparam and rparam in data association expressions
<= ;
'''
# Define tokenization regex
lspec = [
('Space', (r'[ \t]+', )),
('ScanCode', (r'S((0x[0-9a-fA-F]+)|([0-9]+))', )),
('ScanCodeStart', (r'S\[', )),
('Pixel', (r'P((0x[0-9a-fA-F]+)|([0-9]+))', )),
('PixelStart', (r'P\[', )),
('Animation', (r'A"[^"]+"', )),
('AnimationStart', (r'A\[', )),
('CodeBegin', (r'\[', )),
('CodeEnd', (r'\]', )),
('Position', (r'r?[xyz]:-?[0-9]+(.[0-9]+)?', )),
('Comma', (r',', )),
('Number', (r'(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', )),
('Dash', (r'-', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
]
rspec = [
('Space', (r'[ \t]+', )),
('Pixel', (r'P((0x[0-9a-fA-F]+)|([0-9]+))', )),
('PixelStart', (r'P\[', )),
('PixelLayer', (r'PL((0x[0-9a-fA-F]+)|([0-9]+))', )),
('PixelLayerStart', (r'PL\[', )),
('Animation', (r'A"[^"]+"', )),
('AnimationStart', (r'A\[', )),
('USBCode', (r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('USBCodeStart', (r'U\[', )),
('ScanCode', (r'S((0x[0-9a-fA-F]+)|([0-9]+))', )),
('ScanCodeStart', (r'S\[', )),
('CodeBegin', (r'\[', )),
('CodeEnd', (r'\]', )),
('Position', (r'r?[xyz]:-?[0-9]+(.[0-9]+)?', )),
('PixelOperator', (r'(\+:|-:|>>|<<)', )),
('RelCROperator', (r'[cr]:i[+-]?', )),
('ColRowOperator', (r'[cr]:', )),
('String', (r'"[^"]*"', )),
('Operator', (r':', )),
('Comma', (r',', )),
('Parenthesis', (r'\(|\)', )),
('Percent', (r'-?(0|([1-9][0-9]*))%', )),
('Number', (r'((0x[0-9a-fA-F]+)|(0|([1-9][0-9]*)))', )),
('Dash', (r'-', )),
('Plus', (r'\+', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
]
# Tokenize, expression stores the result, status is returned
return self.tokenize_base(kll_expression, lspec, rspec)
def tokenize_assignment(self, kll_expression):
'''
Tokenize lparam and rparam in assignment expressions
= ;
'''
# Define tokenization regex
lspec = [
('Space', (r'[ \t]+', )),
('Number', (r'(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
('CodeBegin', (r'\[', )),
('CodeEnd', (r'\]', )),
]
rspec = [
('Space', (r'[ \t]+', )),
('String', (r'"[^"]*"', )),
('SequenceString', (r"'[^']*'", )),
('Number', (r'(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
('VariableContents', (r'''[^"' ;:=>()]+''', )),
]
# Tokenize, expression stores the result, status is returned
return self.tokenize_base(kll_expression, lspec, rspec)
def tokenize_mapping(self, kll_expression):
'''
Tokenize lparam and rparam in mapping expressions
: ; # Set mapping
:+ ; # Mappping append
:- ; # Mapping removal
:: ; # Replace mapping (does nothing if nothing to replace)
Isolated versions of mappings
When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
i: ;
i:+ ;
i:- ;
i:: ;
'''
# Define tokenization regex
lspec = [
('Space', (r'[ \t]+', )),
('USBCode', (r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('USBCodeStart', (r'U\[', )),
('ConsCode', (r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('ConsCodeStart', (r'CONS\[', )),
('SysCode', (r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('SysCodeStart', (r'SYS\[', )),
('ScanCode', (r'S((0x[0-9a-fA-F]+)|([0-9]+))', )),
('ScanCodeStart', (r'S\[', )),
('IndCode', (r'I(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('IndicatorStart', (r'I\[', )),
('Pixel', (r'P((0x[0-9a-fA-F]+)|([0-9]+))', )),
('PixelStart', (r'P\[', )),
('Animation', (r'A"[^"]+"', )),
('AnimationStart', (r'A\[', )),
('LayerStart', (r'Layer(|Shift|Latch|Lock)\[', )),
('TriggerStart', (r'T\[', )),
('CodeBegin', (r'\[', )),
('CodeEnd', (r'\]', )),
('String', (r'"[^"]*"', )),
('SequenceStringL', (r"'[^']*'", )),
('Operator', (r':', )),
('Comma', (r',', )),
('Plus', (r'\+', )),
('Parenthesis', (r'\(|\)', )),
('Timing', (r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', )),
('Number', (r'(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', )),
('Dash', (r'-', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
]
rspec = [
('Space', (r'[ \t]+', )),
('USBCode', (r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('USBCodeStart', (r'U\[', )),
('ConsCode', (r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('ConsCodeStart', (r'CONS\[', )),
('SysCode', (r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', )),
('SysCodeStart', (r'SYS\[', )),
('ScanCode', (r'S((0x[0-9a-fA-F]+)|([0-9]+))', )),
('ScanCodeStart', (r'S\[', )),
('Pixel', (r'P((0x[0-9a-fA-F]+)|([0-9]+))', )),
('PixelStart', (r'P\[', )),
('PixelLayer', (r'PL((0x[0-9a-fA-F]+)|([0-9]+))', )),
('PixelLayerStart', (r'PL\[', )),
('Animation', (r'A"[^"]+"', )),
('AnimationStart', (r'A\[', )),
('LayerStart', (r'Layer(|Shift|Latch|Lock)\[', )),
('CodeBegin', (r'\[', )),
('CodeEnd', (r'\]', )),
('String', (r'"[^"]*"', )),
('SequenceStringR', (r"'[^']*'", )),
('None', (r'None', )),
('Operator', (r':', )),
('Comma', (r',', )),
('Plus', (r'\+', )),
('Parenthesis', (r'\(|\)', )),
('Timing', (r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', )),
('Number', (r'((0x[0-9a-fA-F]+)|(0|([1-9][0-9]*)))', )),
('Dash', (r'-', )),
('Name', (r'[A-Za-z_][A-Za-z_0-9]*', )),
]
# Tokenize, expression stores the result, status is returned
return self.tokenize_base(kll_expression, lspec, rspec)
## Parsers ##
def parse_base(self, kll_expression, parse_expression, quiet):
'''
Base parsing logic
@param kll_expression: Expression being parsed, contains tokens
@param parse_expression: Parse tree expression that understands the group of tokens
@param quiet: Reduces verbosity, used when re-running an errored command in debug mode
@return: False if parsing wasn't successful
'''
ret = True
try:
# Since the expressions have already been pre-organized, we only expect a single expression at a time
ret = parse_expression.parse(kll_expression.final_tokens())
# Parse intepretation error, more info is provided by the specific parse intepreter
if not ret and not quiet:
print(kll_expression.final_tokens())
except NoParseError as err:
if not quiet:
print(kll_expression.final_tokens())
print("\033[1;33m{0}\033[0m".format(err))
ret = False
raise
return ret
def parse_name_association(self, kll_expression, quiet=False):
'''
Parse name association expressions
=> ;
'''
# Import parse elements/lambda functions
from kll.common.parse import (
comma,
name,
number,
operator,
parenthesis,
unarg,
Make,
)
# Name Association
# => ;
capability_arguments = name + skip(operator(':')) + number + skip(maybe(comma)) >> unarg(Make.capArg)
capability_expression = name + skip(operator('=>')) + name + skip(parenthesis('(')) + many(capability_arguments) + skip(parenthesis(')')) >> unarg(kll_expression.capability)
# Name Association
# => ;
define_expression = name + skip(operator('=>')) + name >> unarg(kll_expression.define)
# Top-level Parser
expr = (
capability_expression |
define_expression
)
return self.parse_base(kll_expression, expr, quiet)
def parse_data_association(self, kll_expression, quiet=False):
'''
Parse data association expressions
<= ;
'''
from kll.common.parse import (
animation_def,
animation_elem,
animation_flattened,
animation_modlist,
comma,
flatten,
operator,
pixel_elem,
pixel_expanded,
pixelmod_elem,
position_list,
triggerCode_outerList,
unarg,
)
# Data Association
# <= ;
# <= ;
animation_expression = (animation_elem | animation_def) + skip(operator('<=')) + animation_modlist >> unarg(kll_expression.animation)
animationFrame_expression = animation_flattened + skip(operator('<=')) + oneplus(pixelmod_elem + skip(maybe(comma))) >> unarg(kll_expression.animationFrame)
# Data Association
# <= ;
pixelPosition_expression = (pixel_expanded | pixel_elem) + skip(operator('<=')) + position_list >> unarg(kll_expression.pixelPosition)
# Data Association
# <= ;
scanCodePosition_expression = (triggerCode_outerList >> flatten >> flatten) + skip(operator('<=')) + position_list >> unarg(kll_expression.scanCodePosition)
# Top-level Parser
expr = (
animation_expression |
animationFrame_expression |
pixelPosition_expression |
scanCodePosition_expression
)
return self.parse_base(kll_expression, expr, quiet)
def parse_assignment(self, kll_expression, quiet=False):
'''
Parse assignment expressions
= ;
'''
# Import parse elements/lambda functions
from kll.common.parse import (
code_begin,
code_end,
comma,
content,
dash,
name,
number,
operator,
string,
unarg,
unseqString,
)
# Assignment
# = ;
variable_contents = name | content | string | number | comma | dash | unseqString
variable_expression = name + skip(operator('=')) + oneplus(variable_contents) >> unarg(kll_expression.variable)
# Array Assignment
# [] = ;
# [] = ;
array_expression = name + skip(code_begin) + maybe(number) + skip(code_end) + skip(operator('=')) + oneplus(variable_contents) >> unarg(kll_expression.array)
# Top-level Parser
expr = (
array_expression |
variable_expression
)
return self.parse_base(kll_expression, expr, quiet)
def parse_mapping(self, kll_expression, quiet=False):
'''
Parse mapping expressions
: ; # Set mapping
:+ ; # Mappping append
:- ; # Mapping removal
:: ; # Replace mapping (does nothing if nothing to replace)
Isolated versions of mappings
When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
i: ;
i:+ ;
i:- ;
i:: ;
'''
# Import parse elements/lambda functions
from kll.common.parse import (
none,
operator,
pixelchan_elem,
resultCode_outerList,
scanCode_single,
triggerCode_outerList,
unarg,
)
# Mapping
# : ;
operatorTriggerResult = operator(':') | operator(':+') | operator(':-') | operator('::') | operator('i:') | operator('i:+') | operator('i:-') | operator('i::')
triggerCode_expression = triggerCode_outerList + operatorTriggerResult + resultCode_outerList >> unarg(kll_expression.triggerCode)
# Data Association
# : ;
pixelChan_expression = pixelchan_elem + skip(operator(':')) + (scanCode_single | none) >> unarg(kll_expression.pixelChannels)
# Top-level Parser
expr = (
triggerCode_expression |
pixelChan_expression
)
return self.parse_base(kll_expression, expr, quiet)
## Processing ##
def tokenize(self, kll_context):
'''
Tokenizes contents of both LOperatorData and ROperatorData
LOperatorData and ROperatorData have different contexts, so tokenization can be simplified a bit
@param context: KLL Context containing file data
'''
ret = True
# Tokenizer map, each takes an expression argument
tokenizers = {
# Name association
'=>': self.tokenize_name_association,
# Data association
'<=': self.tokenize_data_association,
# Assignment
'=': self.tokenize_assignment,
# Mapping
# All : based operators have the same structure
# The only difference is the application context (handled in a later stage)
':': self.tokenize_mapping,
}
# Tokenize left and right parameters of the expression
for kll_expression in kll_context.expressions:
# Determine which parser to use
token = kll_expression.operator_type()
# If there was a problem tokenizing, display exprersion info
if not tokenizers[token](kll_expression):
ret = False
print("{0} {1}:tokenize -> {2}:{3}".format(
ERROR,
self.__class__.__name__,
kll_context.parent.path,
kll_expression.lparam_token.start[0],
))
# Debug Output
# Displays each parsed expression on a single line
# Includes :
if self.token_debug:
# Uncolorize if requested
output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m\033[1;36;41m>\033[0m {3}".format(
os.path.basename(kll_context.parent.path),
kll_expression.lparam_token.start[0],
kll_expression.__class__.__name__,
kll_expression.final_tokens(),
)
print(self.color and output or ansi_escape.sub('', output))
return ret
def parse(self, kll_context):
'''
Parse the fully tokenized expressions
@param kll_context: KLL Context which has the fully tokenized expression list
'''
ret = True
# Parser map of functions, each takes an expression argument
parsers = {
# Name association
'=>': self.parse_name_association,
# Data association
'<=': self.parse_data_association,
# Assignment
'=': self.parse_assignment,
# Mapping
# All : based operators have the same structure
# The only difference is the application context (handled in a later stage)
':': self.parse_mapping,
}
# Parse each expression to extract the data from it
for kll_expression in kll_context.expressions:
token = kll_expression.operator_type()
# Assume failed, unless proven otherwise
cur_ret = False
# In some situations we don't want a parser trace, but only disable when we know
parser_debug_ignore = False
# If there was a problem parsing, display expression info
# Catch any TypeErrors due to incorrect parsing rules
try:
cur_ret = parsers[token](kll_expression)
# Unexpected token (user grammar error), sometimes might be a bug
except NoParseError as err:
import traceback
traceback.print_tb(err.__traceback__)
print(type(err).__name__, err)
print("\033[1mBad kll expression, usually a syntax error.\033[0m")
cur_ret = False
# Invalid parsing rules, definitely a bug
except TypeError as err:
import traceback
traceback.print_tb(err.__traceback__)
print(type(err).__name__, err)
print("\033[1mBad parsing rule, this is a bug!\033[0m")
# Lookup error, invalid lookup
except KeyError as err:
import traceback
print("".join(traceback.format_tb(err.__traceback__)[-1:]), end='')
print("\033[1mInvalid dictionary lookup, check syntax.\033[0m")
parser_debug_ignore = True
# Parsing failed, show more error info
if not cur_ret:
ret = False
# We don't always want a full trace of the parser
if not parser_debug_ignore:
# StringIO stream from funcparserlib parser.py
# Command failed, run again, this time with verbose logging enabled
# Helps debug erroneous parsing expressions
parser_log = io.StringIO()
# This part is not thread-safe
# You must run with --jobs 1 to get 100% valid output
Parser_debug(True, parser_log)
try:
parsers[token](kll_expression, True)
except BaseException:
pass
Parser_debug(False)
# Display
print(parser_log.getvalue())
# Cleanup StringIO
parser_log.close()
print("{0} {1}:parse -> {2}:{3}".format(
ERROR,
self.__class__.__name__,
kll_context.parent.path,
kll_expression.lparam_token.start[0],
))
# Debug Output
# Displays each parsed expression on a single line
# Includes :
if self.parser_debug:
# Uncolorize if requested
output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
os.path.basename(kll_context.parent.path),
kll_expression.lparam_token.start[0],
kll_expression.__class__.__name__,
kll_expression.type,
kll_expression
)
print(self.color and output or ansi_escape.sub('', output))
if self.parser_token_debug:
# Uncolorize if requested
output = "\t\033[1;4mTokens\033[0m\033[1;36m:\033[0m {0}".format(
[(t.type, t.value) for t in kll_expression.final_tokens()]
)
print(self.color and output or ansi_escape.sub('', output))
return ret
def process(self):
'''
Compiler Configuration Processing
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Acquire thread pool
pool = self.control.stage('CompilerConfigurationStage').pool
# Get list of KLL contexts
contexts = self.control.stage('OperationClassificationStage').contexts
# Tokenize operators
if False in pool.map(self.tokenize, contexts):
self._status = 'Incomplete'
return
# Parse operators
if False in pool.map(self.parse, contexts):
self._status = 'Incomplete'
return
self._status = 'Completed'
class OperationOrganizationStage(Stage):
'''
Operation Organization Stage
* Using the type of each operation, apply the KLL Context to each operation
* This results in various datastructures being populated based upon the context and type of operation
* Each Context instance (distinct Context of the same type), remain separate
'''
def __init__(self, control):
'''
Initialize configuration variables
'''
super().__init__(control)
self.operation_organization_debug = False
self.operation_organization_display = False
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.operation_organization_debug = args.operation_organization_debug
self.operation_organization_display = args.operation_organization_display
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mOperation Organization Configuration\033[0m')
# Optional Arguments
group.add_argument(
'--operation-organization-debug',
action='store_true',
default=self.operation_organization_debug,
help="Enable operation organization debug output.\n",
)
group.add_argument(
'--operation-organization-display',
action='store_true',
default=self.operation_organization_display,
help="Show datastructure of each context after filling.\n",
)
def organize(self, kll_context):
'''
Organize each set of expressions on a context level
The full layout organization occurs over multiple stages, this is the first one
'''
# Add each of the expressions to the organization data structure
try:
for kll_expression in kll_context.expressions:
# Debug output
if self.operation_organization_debug:
# Uncolorize if requested
output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
os.path.basename(kll_context.parent.path),
kll_expression.lparam_token.start[0],
kll_expression.__class__.__name__,
kll_expression.type,
kll_expression
)
print(self.color and output or ansi_escape.sub('', output))
# Set connect_id for expression
kll_expression.connect_id = kll_context.connect_id
# Add expression
kll_context.organization.add_expression(
kll_expression,
(self.operation_organization_debug, self.color)
)
except Exception as err:
import traceback
traceback.print_tb(err.__traceback__)
print(type(err).__name__, err)
print("Could not add/modify kll expression in context datastructure.")
return False
return True
def process(self):
'''
Operation Organization Stage Processing
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Acquire thread pool
pool = self.control.stage('CompilerConfigurationStage').pool
# Get list of KLL contexts
contexts = self.control.stage('OperationClassificationStage').contexts
# Add expressions from contexts to context datastructures
if False in pool.map(self.organize, contexts):
self._status = 'Incomplete'
return
# Show result of filling datastructure
if self.operation_organization_display:
for kll_context in contexts:
# Uncolorize if requested
output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m".format(
os.path.basename(kll_context.parent.path),
kll_context.__class__.__name__
)
print(self.color and output or ansi_escape.sub('', output))
# Display Table
for store in kll_context.organization.stores():
# Uncolorize if requested
output = "\t\033[1;4;32m{0}\033[0m".format(
store.__class__.__name__
)
print(self.color and output or ansi_escape.sub('', output))
print(self.color and store or ansi_escape.sub('', store), end="")
self._status = 'Completed'
class DataOrganizationStage(Stage):
'''
Data Organization Stage
* Using the constructed Context datastructures, merge contexts of the same type together
* Precedence/priority is defined by the order each Context was included on the command line
* May include datastructure data optimizations
'''
def __init__(self, control):
'''
Initialize configuration variables
'''
super().__init__(control)
self.data_organization_debug = False
self.data_organization_display = False
self.contexts = None
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.data_organization_debug = args.data_organization_debug
self.data_organization_display = args.data_organization_display
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
# Optional Arguments
group.add_argument(
'--data-organization-debug',
action='store_true',
default=self.data_organization_debug,
help="Show debug info from data organization stage.\n",
)
group.add_argument(
'--data-organization-display',
action='store_true',
default=self.data_organization_display,
help="Show datastructure of each context after merging.\n",
)
def sort_contexts(self, contexts):
'''
Returns a dictionary of list of sorted 'like' contexts
This is used to group the contexts that need merging
'''
lists = {}
for kll_context in contexts:
name = kll_context.__class__.__name__
# PartialMapContext's are sorted by name *and* layer number
if name == "PartialMapContext":
name = "{0}{1}".format(name, kll_context.layer)
# Add new list if no elements yet
if name not in lists.keys():
lists[name] = [kll_context]
else:
lists[name].append(kll_context)
if self.data_organization_debug:
output = "\033[1mContext Organization\033[0m"
for key, val in sorted(lists.items()):
output += "\n{}".format(key)
for elem in val:
output += "\n\t{} - {}".format(elem.layer_info(), elem.kll_files)
print(self.color and output or ansi_escape.sub('', output))
return lists
def organize(self, kll_context):
'''
Symbolically merge all like Contexts
The full layout organization occurs over multiple stages, this is the second stage
'''
# Lookup context name
context_name = "{0}".format(kll_context[0].__class__.__name__)
# PartialMapContext's are sorted by name *and* layer number
if context_name == "PartialMapContext":
context_name = "{0}{1}".format(context_name, kll_context[0].layer)
# Initialize merge context as the first one
self.contexts[context_name] = context.MergeContext(kll_context[0])
# Indicate when a context is skipped as there is only one
if self.data_organization_debug:
if len(kll_context) < 2:
output = "\033[1;33mSkipping\033[0m\033[1m:\033[1;32m{0}\033[0m".format(
context_name
)
print(self.color and output or ansi_escape.sub('', output))
return True
# The incoming list is ordered
# Merge in each of the contexts symbolically
for next_context in kll_context[1:]:
try:
if self.data_organization_debug:
output = "\033[1m=== Merging ===\033[0m {1} into {0}".format(self.contexts[context_name].kll_files, next_context.kll_files)
print(self.color and output or ansi_escape.sub('', output))
self.contexts[context_name].merge(
next_context,
context_name,
(self.data_organization_debug, self.color)
)
except Exception as err:
import traceback
traceback.print_tb(err.__traceback__)
print(type(err).__name__, err)
print("Could not merge '{0}' into '{1}' context.".format(
os.path.basename(next_context.parent.path),
context_name
))
return False
# After merging contexts, update Context information.
# If a BaseMap, apply modifier to each of the expressions.
# This is used by emitter to decide whether the expression can be filtered out.
if context_name == 'BaseMapContext':
for key, expr in self.contexts['BaseMapContext'].organization.mapping_data.data.items():
# Only applys to MapExpressions
if isinstance(expr[0], expression.MapExpression):
expr[0].base_map = True
return True
def process(self):
'''
Data Organization Stage Processing
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Acquire thread pool
pool = self.control.stage('CompilerConfigurationStage').pool
# Get list of KLL contexts
contexts = self.control.stage('OperationClassificationStage').contexts
# Get sorted list of KLL contexts
sorted_contexts = self.sort_contexts(contexts)
self.contexts = {}
# Add expressions from contexts to context datastructures
if False in pool.map(self.organize, sorted_contexts.values()):
self._status = 'Incomplete'
return
# Show result of filling datastructure
if self.data_organization_display:
for key, kll_context in self.contexts.items():
# Uncolorize if requested
output = "\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
key,
kll_context.paths(),
)
print(self.color and output or ansi_escape.sub('', output))
# Display Table
for store in kll_context.organization.stores():
# Uncolorize if requested
output = "\t\033[1;4;32m{0}\033[0m".format(
store.__class__.__name__
)
print(self.color and output or ansi_escape.sub('', output))
print(self.color and store or ansi_escape.sub('', store), end="")
self._status = 'Completed'
class DataFinalizationStage(Stage):
'''
Data Finalization Stage
* Using the merged Context datastructures, apply the Configuration and BaseMap contexts to the higher
level DefaultMap and PartialMap Contexts
* First BaseMap is applied on top of Configuration
* Next, DefaultMap is applied on top of (Configuration+BaseMap) as well as the PartialMaps
* May include datastructure data optimizations
'''
def __init__(self, control):
'''
Initialize configuration variables
'''
super().__init__(control)
self.data_finalization_debug = False
self.data_finalization_display = False
self.base_context = None
self.default_context = None
self.partial_contexts = None
self.full_context = None
self.context_list = None
self.layer_contexts = None
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.data_finalization_debug = args.data_finalization_debug
self.data_finalization_display = args.data_finalization_display
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
# Optional Arguments
group.add_argument(
'--data-finalization-debug',
action='store_true',
default=self.data_finalization_debug,
help="Show debug info from data finalization stage.\n",
)
group.add_argument(
'--data-finalization-display',
action='store_true',
default=self.data_finalization_display,
help="Show datastructure of each context after merging.\n",
)
def process(self):
'''
Data Organization Stage Processing
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Get context silos
contexts = self.control.stage('DataOrganizationStage').contexts
self._status = 'Incomplete'
# Context list
self.context_list = []
# Depending on the calling order, we may need to use a GenericContext or ConfigurationContext as the base
# Default to ConfigurationContext first
if 'ConfigurationContext' in contexts.keys():
self.base_context = context.MergeContext(contexts['ConfigurationContext'])
# If we still have GenericContexts around, merge them on top of the ConfigurationContext
if 'GenericContext' in contexts.keys():
self.base_context.merge(
contexts['GenericContext'],
'GenericContext',
(self.data_finalization_debug, self.color)
)
# Otherwise, just use a GenericContext
elif 'GenericContext' in contexts.keys():
self.base_context = context.MergeContext(contexts['GenericContext'])
# Fail otherwise, you *must* have a GenericContext or ConfigurationContext
else:
print("{0} Missing a 'GenericContext' and/or 'ConfigurationContext'.".format(ERROR))
self._status = 'Incomplete'
return
# Next use the BaseMapContext and overlay on ConfigurationContext
# This serves as the basis for the next two merges
if 'BaseMapContext' in contexts.keys():
self.base_context.merge(
contexts['BaseMapContext'],
'BaseMapContext',
(self.data_finalization_debug, self.color)
)
self.context_list.append(('BaseMapContext', self.base_context))
# Then use the DefaultMapContext as the default keyboard mapping
self.default_context = context.MergeContext(self.base_context)
if 'DefaultMapContext' in contexts.keys():
self.default_context.merge(
contexts['DefaultMapContext'],
'DefaultMapContext',
(self.data_finalization_debug, self.color)
)
self.context_list.append(('DefaultMapContext', self.default_context))
# For convenience build a fully merged dataset
# This is usually only required for variables
self.full_context = context.MergeContext(self.default_context)
# Finally setup each of the PartialMapContext groups
# Build list of PartialMapContexts and sort by layer before iterating over
self.partial_contexts = []
partial_context_list = [
(item[1].layer, item[1])
for item in contexts.items()
if 'PartialMapContext' in item[0]
]
for layer, partial in sorted(partial_context_list, key=lambda x: x[0]):
# Start with base context
self.partial_contexts.append(context.MergeContext(self.base_context))
# Merge in layer
self.partial_contexts[layer].merge(
partial,
'PartialMapContext',
(self.data_finalization_debug, self.color)
)
# Add to context list
self.context_list.append(('PartialMapContext{0}'.format(layer), self.default_context))
# Add each partial to the full_context as well
self.full_context.merge(
partial,
'PartialMapContext',
(self.data_finalization_debug, self.color)
)
# Build layer context list
# Each index of the list corresponds to the keyboard layer
self.layer_contexts = [self.default_context]
self.layer_contexts.extend(self.partial_contexts)
# Show result of filling datastructure
if self.data_finalization_display:
for key, kll_context in self.context_list:
# Uncolorize if requested
output = "*\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
key,
kll_context.paths(),
)
print(self.color and output or ansi_escape.sub('', output))
# Display Table
for store in kll_context.organization.stores():
# Uncolorize if requested
output = "\t\033[1;4;32m{0}\033[0m".format(
store.__class__.__name__
)
print(self.color and output or ansi_escape.sub('', output))
print(self.color and store or ansi_escape.sub('', store), end="")
self._status = 'Completed'
class DataAnalysisStage(Stage):
'''
Data Analysis Stage
* Using the completed Context datastructures, do additional analysis that may be required for Code Generation
'''
def __init__(self, control):
'''
Initialize configuration variables
'''
super().__init__(control)
self.data_analysis_debug = False
self.data_analysis_display = False
self.trigger_index = []
self.result_index = []
self.trigger_index_lookup = dict()
self.trigger_index_reduced_lookup = dict()
self.result_index_lookup = dict()
self.result_index = []
self.trigger_lists = []
# NOTE Interconnect offsets are determined in the preprocessor stage
self.max_scan_code = []
self.min_scan_code = []
self.rotation_map = dict()
self.interconnect_scancode_offsets = []
self.interconnect_pixel_offsets = []
self.scancode_positions = dict()
self.pixel_positions = dict()
self.pixel_display_mapping = []
self.pixel_display_params = dict()
self.animation_settings = dict()
self.animation_settings_orig = dict()
self.animation_settings_list = []
self.animation_uid_lookup = dict()
self.partial_contexts = None
self.layer_contexts = None
self.full_context = None
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.data_analysis_debug = args.data_analysis_debug
self.data_analysis_display = args.data_analysis_display
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mData Analysis Configuration\033[0m')
# Optional Arguments
group.add_argument(
'--data-analysis-debug',
action='store_true',
default=self.data_analysis_debug,
help="Show debug info from data analysis stage.\n",
)
group.add_argument(
'--data-analysis-display',
action='store_true',
default=self.data_analysis_display,
help="Show results of data analysis.\n",
)
def reduction(self):
'''
Builds a new reduced_contexts list
For each of the layers, evaluate triggers into ScanCodes (USBCode to ScanCodes)
(all other triggers don't require reductions)
'''
self.reduced_contexts = []
if self.data_analysis_debug:
print("\033[1m--- Analysis Reduction ---\033[0m")
for index, layer in enumerate(self.layer_contexts):
if self.data_analysis_debug:
print("\033[1m ++ Layer {0} ++\033[0m".format(index))
reduced = context.MergeContext(layer)
reduced.reduction(debug=self.data_analysis_debug)
self.reduced_contexts.append(reduced)
# Filter out BaseMap specific expressions
if self.data_analysis_debug:
print("\033[1m == BaseMap Cleanup ==\033[0m")
# Skip DefaultLayer (0) as there is nothing to cleanup
for index, layer in enumerate(self.reduced_contexts[1:]):
if self.data_analysis_debug:
print("\033[1m ++ Layer {0} ++\033[0m".format(index + 1))
layer.cleanup((self.data_analysis_debug, self.color))
if self.data_analysis_debug:
print(layer.organization.mapping_data)
def generate_mapping_indices(self):
'''
For each trigger:result pair generate a unique index
The triggers and results are first sorted alphabetically
'''
if self.data_analysis_debug or self.data_analysis_display:
print("\033[1m--- Mapping Indices ---\033[0m")
# Build uniq dictionary of map expressions
# Only reduce true duplicates (identical kll expressions) initially
expressions = dict()
# Gather list of expressions
for index, layer in enumerate(self.reduced_contexts):
# Set initial min/max ScanCode
self.min_scan_code.append(0xFFFF)
self.max_scan_code.append(0)
# Add each expression in layer to overall dictionary lookup for command reduction
for key, elem in layer.organization.mapping_data.data.items():
# Add each of the expressions (usually just one)
# Before adding the expression, adjust the scancode using the connect_id offset
for sub_expr in elem:
scancode_offset = self.interconnect_scancode_offsets[sub_expr.connect_id]
sub_expr.add_trigger_uid_offset(scancode_offset)
expressions[sub_expr.kllify()] = sub_expr
# We only need to use the first expression, as the triggers are all the same
# Determine min ScanCode of each trigger expression
min_uid = elem[0].min_trigger_uid()
if min_uid < self.min_scan_code[index]:
self.min_scan_code[index] = min_uid
# Determine max ScanCode of each trigger expression
max_uid = elem[0].max_trigger_uid()
if max_uid > self.max_scan_code[index]:
self.max_scan_code[index] = max_uid
# Unset min_scan_code if not set
if self.min_scan_code[index] == 0xFFFF and self.max_scan_code[index] == 0:
self.min_scan_code[index] = 0
# Sort expressions by trigger and result, there may be *duplicate* triggers however don't reduce yet
# we need the trigger->result and result->trigger mappings still
trigger_sorted = dict()
trigger_sorted_reduced = dict()
result_sorted = dict()
for key, elem in expressions.items():
# Trigger Sorting (we don't use trigger_str() here as it would cause reduction)
trig_key = key
if trig_key not in trigger_sorted.keys():
trigger_sorted[trig_key] = [elem]
else:
trigger_sorted[trig_key].append(elem)
# Trigger Sorting, reduced dictionary for trigger guides (i.e. we want reduction)
trig_key = elem.trigger_str()
if trig_key not in trigger_sorted_reduced.keys():
trigger_sorted_reduced[trig_key] = [elem]
else:
trigger_sorted_reduced[trig_key].append(elem)
# Result Sorting
res_key = elem.result_str()
if res_key not in result_sorted.keys():
result_sorted[res_key] = [elem]
else:
result_sorted[res_key].append(elem)
# Debug info
if self.data_analysis_debug or self.data_analysis_display:
print("\033[1mMin ScanCode\033[0m: {0}".format(self.min_scan_code))
print("\033[1mMax ScanCode\033[0m: {0}".format(self.max_scan_code))
# Build indices
self.trigger_index = [elem for key, elem in sorted(trigger_sorted.items(), key=lambda x: x[1][0].sort_trigger())]
self.trigger_index_reduced = [elem for key, elem in sorted(trigger_sorted_reduced.items(), key=lambda x: x[1][0].sort_trigger())]
self.result_index = [elem for key, elem in sorted(result_sorted.items(), key=lambda x: x[1][0].sort_result())]
# Build index lookup
# trigger_index_lookup has a full lookup so we don't lose information
self.trigger_index_lookup = {name[0].kllify(): index for index, name in enumerate(self.trigger_index)}
self.trigger_index_reduced_lookup = {name[0].sort_trigger(): index for index, name in enumerate(self.trigger_index_reduced)}
self.result_index_lookup = {name[0].sort_result(): index for index, name in enumerate(self.result_index)}
def generate_map_offset_table(self):
'''
Generates list of offsets for each of the interconnect ids
'''
if self.data_analysis_debug or self.data_analysis_display:
print("\033[1m--- Map Offsets ---\033[0m")
print("Scan Code Offsets: {0}".format(self.interconnect_scancode_offsets))
print("Pixel Id Offsets: {0}".format(self.interconnect_pixel_offsets))
# FIXME Should this be removed entirely?
return
print("{0} This functionality is handled by the preprocessor".format(ERROR))
maxscancode = {}
maxpixelid = {}
for index, layer in enumerate(self.reduced_contexts):
# Find the max scancode of each the layers
# A max scancode for each of the interconnect ids found
for key, value in layer.organization.maxscancode().items():
if key not in maxscancode.keys() or maxscancode[key] < value:
maxscancode[key] = value
# Find the max pixel id for each of the interconnect ids found
for key, value in layer.organization.maxpixelid().items():
if key not in maxpixelid.keys() or maxpixelid[key] < value:
maxpixelid[key] = value
# Build scancode list of offsets
self.interconnect_scancode_offsets = []
cumulative = 0
if len(maxscancode.keys()) > 0:
for index in range(max(maxscancode.keys()) + 1):
# Set offset, then add max to cumulative
self.interconnect_scancode_offsets.append(cumulative)
cumulative += maxscancode[index]
# Build pixel id list of offsets
self.interconnect_pixel_offsets = []
cumulative = 0
if len(maxpixelid.keys()) > 0:
for index in range(max(maxpixelid.keys()) + 1):
# Set offset, then add max to cumulative
self.interconnect_pixel_offsets.append(cumulative)
cumulative += maxscancode[index]
if self.data_analysis_debug:
print("\033[1m--- Map Offsets ---\033[0m")
print("Scan Code Offsets: {0}".format(self.interconnect_scancode_offsets))
print("Pixel Id Offsets: {0}".format(self.interconnect_pixel_offsets))
def generate_trigger_lists(self):
'''
Generate Trigger Lists per layer using the index lists
'''
if self.data_analysis_debug or self.data_analysis_display:
print("\033[1m--- Trigger Lists ---\033[0m")
# Iterate through each of the layers (starting from 0/Default)
# Generate the trigger list for each of the ScanCodes
# A trigger list is a list of all possible trigger macros that may be initiated by a ScanCode
for index, layer in enumerate(self.reduced_contexts):
# Initialize trigger list by max index size
self.trigger_lists.append([None] * (self.max_scan_code[index] + 1))
# Iterate over each expression
for key, elem in layer.organization.mapping_data.data.items():
# Each trigger, may have multiple results
for sub_expr in elem:
# Get list of ids from expression
for identifier in sub_expr.trigger_id_list():
# If animation, set the uid first by doing a uid lookup
if identifier.type in ['Animation']:
identifier.uid = self.animation_uid_lookup[identifier.name]
# Append each uid to Trigger List
if identifier.type in ['Animation', 'IndCode', 'GenericTrigger', 'Layer', 'LayerLock', 'LayerShift', 'LayerLatch', 'ScanCode']:
# In order to uniquely identify each trigger, using full kll expression as lookup
trigger_index = self.trigger_index_lookup[sub_expr.kllify()]
# Initialize trigger list if None
if self.trigger_lists[index][identifier.get_uid()] is None:
self.trigger_lists[index][identifier.get_uid()] = [trigger_index]
# Append to trigger list, only if trigger not already added
elif trigger_index not in self.trigger_lists[index][identifier.get_uid()]:
self.trigger_lists[index][identifier.get_uid()].append(trigger_index)
# Debug output
if self.data_analysis_debug or self.data_analysis_display:
print("\033[1mTrigger List\033[0m: {0} {1}".format(index, self.trigger_lists[index]))
def generate_rotation_ranges(self):
'''
Generate Rotation Ranges
Using the reduced contexts determine the uids of the rotation triggers used.
And calculate the size of the rotation (so KLL knowns where the wrap-around occurs)
Currently only used for Generic Trigger 21
T[21,0](0) : ;
T[21,0](1) : ;
T[21,0](2) : ; # uid 0, range 0..2
T[21,3](6) : ; # uid 3, range 0..6
We don't need to worry about capabilities doing triggers that don't exist.
Those will be ignored at runtime.
'''
# Iterate over each layer
for layer in self.reduced_contexts:
# Iterate over each expression
for key, elem in layer.organization.mapping_data.data.items():
# Each trigger, may have multiple results
for sub_expr in elem:
# Get list of ids from expression
for identifier in sub_expr.trigger_id_list():
# Determine if GenericTrigger
if identifier.type in ['GenericTrigger'] and identifier.idcode == 21:
# If uid not in rotation_map, add it
if identifier.uid not in self.rotation_map.keys():
self.rotation_map[identifier.uid] = 0
# If there is no parameter raise an error
if len(identifier.parameters) != 1:
self._status = 'Incomplete'
print("{} Rotation trigger must have 1 parameter e.g. T[21,1](3): {}".format(
ERROR,
elem,
))
continue
# Set the maximum rotation value
if identifier.parameters[0].state > self.rotation_map[identifier.uid]:
self.rotation_map[identifier.uid] = identifier.parameters[0].state
def generate_pixel_display_mapping(self):
'''
Generate Pixel Display Mapping
First generate a position dictionary of all pixels using Pixel Index addresses and x,y,z positions
Build a 2-dimensinoal mapping of all Pixels.
Use UnitSize, ColumnSize and RowSize to determine pixel fit.
* Build list of all pixels in a rows/columns
* Find min/max x/y to determine size of grid
* Use UnitSize to determine where to place each Pixel
* Error if we cannot fit all Pixels
'''
# Query the scancode and pixels positions
scancode_physical = self.full_context.query('DataAssociationExpression', 'ScanCodePosition')
pixel_physical = self.full_context.query('DataAssociationExpression', 'PixelPosition')
pixel_indices = self.full_context.query('MapExpression', 'PixelChannel')
pixel_indices_filtered = list(filter(lambda x: not isinstance(x.position, list), pixel_indices.data.values()))
physical = scancode_physical.data.copy()
physical.update(pixel_physical.data)
positions = dict()
scancode_positions = dict()
for key, item in physical.items():
entry = dict()
# Acquire each dimension
entry['x'] = item.association[0].x
entry['y'] = item.association[0].y
entry['z'] = item.association[0].z
# Not every pixel has a scancode mapping
scancode_uid = None
# Check each dimension, set to 0 if None
for k in entry.keys():
if entry[k] is None:
entry[k] = 0.0
else:
entry[k] = float(entry[k])
# Query Pixel index
uid = item.association[0].uid
if isinstance(uid, id.PixelAddressId):
uid = uid.index
# Use the ScanCode to perform a pixel uid lookup
else:
# Filter list, looking for ScanCode uid
lookup = list(filter(lambda x: x.position.uid == uid, pixel_indices_filtered))
scancode_uid = uid
# TODO (HaaTa) Make sure this is a valid ScanCode
# Also add a scancode_position entry (if this is a scancode)
scancode_positions[scancode_uid] = copy.copy(entry)
# Then lookup the pixel uid
if len(lookup) > 0:
uid = lookup[0].pixel.uid.index
# Also add a PixelId if one is found to the scancode_position entry
scancode_positions[scancode_uid]['PixelId'] = uid
positions[uid] = copy.copy(entry)
# Only add ScanCode if one was found
if scancode_uid is not None:
positions[uid]['ScanCode'] = scancode_uid
# Having a full list of Physical Positions is useful during code generation
self.pixel_positions = positions
self.scancode_positions = scancode_positions
# Lookup Pixel Display Mapping parameters
variables = self.full_context.query('AssignmentExpression', 'Variable')
try:
unit_size = float(variables.data['Pixel_DisplayMapping_UnitSize'].value)
column_size = int(variables.data['Pixel_DisplayMapping_ColumnSize'].value)
row_size = int(variables.data['Pixel_DisplayMapping_RowSize'].value)
column_direction = int(variables.data['Pixel_DisplayMapping_ColumnDirection'].value)
row_direction = int(variables.data['Pixel_DisplayMapping_RowDirection'].value)
except KeyError:
unit_size = 1
column_size = 20
row_size = 20
column_direction = 1
row_direction = 1
# Determine the min/max x/y for defining the mapping bounds
minval = {'x': 0, 'y': 0}
maxval = {'x': 0, 'y': 0}
for key, item in positions.items():
for val in ['x', 'y']:
if item[val] > maxval[val]:
maxval[val] = item[val]
if item[val] < minval[val]:
minval[val] = item[val]
# Calculate grid parameters
height_val = maxval['y'] - minval['y']
width_val = maxval['x'] - minval['x']
height = int(round(height_val / unit_size * column_size)) + 1
width = int(round(width_val / unit_size * row_size)) + 1
height_offset = minval['y'] * -1
width_offset = minval['x'] * -1
# Set parameters
self.pixel_display_params['Columns'] = width
self.pixel_display_params['Rows'] = height
# Define grid
self.pixel_display_mapping = [[0 for x in range(width)] for y in range(height)]
# Place each of the pixels within the x,y mapping
# Display an error if any pixel is overwritten (i.e. replacing non-0 value)
for key, item in positions.items():
# Calculate the percentage position in the grid
x_percent = ((item['x'] + width_offset) / width_val)
y_percent = ((item['y'] + height_offset) / height_val)
# Direction
if column_direction == -1:
y_percent = 1 - y_percent
if row_direction == -1:
x_percent = 1 - x_percent
# Determine the exact position
x = x_percent * (width - 1)
y = y_percent * (height - 1)
# First check with rounding
x_round = int(round(x))
y_round = int(round(y))
# Make sure we don't overwrite another pixel
if self.pixel_display_mapping[y_round][x_round] == 0:
self.pixel_display_mapping[y_round][x_round] = key
# Error out
# XXX We should try additional fitting locations -HaaTa
else:
print("{0} Cannot fit:".format(WARNING),
key, item, x_round, y_round, self.pixel_display_mapping[y_round][x_round]
)
# Debug info
if self.data_analysis_debug:
for row in self.pixel_display_mapping:
print(row)
def find_animation_result(self, result_expr):
'''
Returns list of animation results
The string'ified version of the object is unique.
Only return a reduced list of objects.
'''
animation_dict = {}
#print( result_expr )
for seq in result_expr:
for combo in seq:
for elem in combo:
if isinstance(elem, id.AnimationId):
animation_dict["{0}".format(elem)] = elem
# Just return the values
return animation_dict.values()
def generate_animation_settings(self):
'''
Generate Animation Settings
This function reconciles default and used animation settings.
Default settings are used to simplify used animation results.
Meaning that you don't have to remember to define the correct interpolation algorithm every time.
Each permutation of animation settings is stored (along with the defaults even if not used directly).
A reduction is done such that only the minimum number of settings entries are created.
animation_settings stores the key => settings lookup
animation_settings_list stores the order of the settings (using the keys)
'''
# Setup defaults
animations = self.full_context.query('DataAssociationExpression', 'Animation')
default_animations = {}
for key, animation in sorted(animations.data.items()):
str_name = "{0}({1})".format(key, animation.value)
self.animation_settings[str_name] = animation.value
self.animation_settings_list.append(str_name)
default_animations[key] = animation.value
val_list = []
for layer in self.layer_contexts:
map_expressions = layer.query('MapExpression')
for expr_type in map_expressions:
for key, expr in map_expressions[expr_type].data.items():
if isinstance(expr, list):
for elem in expr:
val_list += self.find_animation_result(elem.results)
# Reduce settings using defaults and determine which are variants needing new settings entries
for val in sorted(frozenset(val_list), key=lambda x: x.__repr__()):
mod_default_list = []
# Lookup defaults
lookup_name = "A[{0}]".format(val.name)
str_name = "{0}".format(val)
found = False
for name in default_animations.keys():
if lookup_name == name:
mod_default_list = default_animations[name].modifiers
found = True
# Otherwise just add it, if there isn't a default
if not found:
self.animation_settings[str_name] = val
self.animation_settings_orig[str_name] = val
self.animation_settings_list.append(str_name)
continue
# Update settings with defaults
new_setting = copy.deepcopy(val)
for setting in mod_default_list:
found = False
for mod in new_setting.modifiers:
if mod.name == setting.name:
found = True
if not found:
new_setting.replace(setting)
# Add update setting
self.animation_settings[str_name] = new_setting
self.animation_settings_orig[str_name] = val
# Make sure we haven't added this setting to the list already
if str_name not in self.animation_settings_list:
self.animation_settings_list.append(str_name)
# Build uid lookup for each of the animations
count = 0
for key, animation in sorted(animations.data.items()):
self.animation_uid_lookup[animation.association.name] = count
count += 1
def analyze(self):
'''
Analyze the set of configured contexts
'''
# Reduce Contexts
# Convert all trigger USBCodes to ScanCodes
self.reduction()
# Show result of filling datastructure
if self.data_analysis_display:
for key, kll_context in enumerate(self.reduced_contexts):
# Uncolorize if requested
output = "*\033[1;33mLayer{0}\033[0m:\033[1m{1}\033[0m".format(
key,
kll_context.paths(),
)
print(self.color and output or ansi_escape.sub('', output))
# Display Table
for store in kll_context.organization.stores():
# Uncolorize if requested
output = "\t\033[1;4;32m{0}\033[0m".format(
store.__class__.__name__
)
print(self.color and output or ansi_escape.sub('', output))
print(self.color and store or ansi_escape.sub('', store), end="")
# Generate 2D Pixel Display Mapping
self.generate_pixel_display_mapping()
# Generate Animation Settings List
self.generate_animation_settings()
# Generate Offset Table
# This is needed for interconnect devices
self.generate_map_offset_table()
# Generate Indices
# Assigns a sequential index (starting from 0) for each map expresssion
self.generate_mapping_indices()
# Generate Trigger Lists
self.generate_trigger_lists()
# Generate Rotation Ranges
self.generate_rotation_ranges()
def process(self):
'''
Data Analysis Stage Processing
'''
self._status = 'Running'
self.max_scan_code = self.control.stage('PreprocessorStage').max_scan_code
self.min_scan_code = self.control.stage('PreprocessorStage').min_scan_code
self.interconnect_scancode_offsets = self.control.stage('PreprocessorStage').interconnect_scancode_offsets
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Acquire list of contexts
self.layer_contexts = self.control.stage('DataFinalizationStage').layer_contexts
self.partial_contexts = self.control.stage('DataFinalizationStage').partial_contexts
self.full_context = self.control.stage('DataFinalizationStage').full_context
# Analyze set of contexts
self.analyze()
# Return if status has changed
if self._status != 'Running':
return
self._status = 'Completed'
class CodeGenerationStage(Stage):
'''
Code Generation Stage
* Generates code for the given firmware backend
* Backend is selected in the Compiler Configuration Stage
* Uses the specified emitter to generate the code
'''
def __init__(self, control):
'''
Initialize configuration variables
'''
super().__init__(control)
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.control.stage('CompilerConfigurationStage').emitters.command_line_args(args)
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mCode Generation Configuration\033[0m')
# Create options groups for each of the Emitters
self.control.stage('CompilerConfigurationStage').emitters.command_line_flags(parser)
def process(self):
'''
Data Organization Stage Processing
'''
self._status = 'Running'
# Determine colorization setting
self.color = self.control.stage('CompilerConfigurationStage').color
# Get Emitter object
self.emitter = self.control.stage('CompilerConfigurationStage').emitters.emitter(
self.control.stage('CompilerConfigurationStage').emitter
)
# Call Emitter
self.emitter.process()
# Generate Outputs using Emitter
self.emitter.output()
# Check Emitter status
if self.emitter.check():
self._status = 'Completed'
else:
self._status = 'Incomplete'
class ReportGenerationStage(Stage):
'''
Report Generation Stage
* Using the datastructures and analyzed data, generate a compiler report
* TODO
'''
PK RKMO kll/emitters/README.md# KLL Compiler - emitters
The KLL compiler uses an emitter to handle the output format of the parsed input KLL file data.
The output can be in whatever format(s) that may required.
## Emitters
Description of each of the KLL emitters.
### [kiibohd](kiibohd)
The main emitter for the KLL compiler, it is used for the [Kiibohd Controller firmware](https://github.com/kiibohd/controller).
It currently generates 3-4 different files.
* `generatedKeymap.h` - Handles layers, scancode to USB code mapping and mapping to capabilities.
* `generatedPixelmap.h` - (PixelMap only) Handles LED channel mapping to pixels and animations.
* `kll_defs.h` - Various defines exported by the KLL compiler and used by the firmware.
* `kll.json` - JSON expansion of datastructures generated by the KLL compiler. Used by testing and other infrastructure around the firmware.
### [kll](kll)
This emitter generates a set of kll files using the input data.
Used by the [regen](../tests/regen.bash) unit test to determine whether the KLL compiler properly tokenized and parsed a given kll file.
Can also be used to manually combine a selection of kll files into one per layer.
### [none](none)
This emitter...generates nothing!
Generally used for syntax checking and debugging.
## Files
Brief description of each of the files.
* [emitters.py](emitters.py) - Handles each of the emitter modules. Must be updated to include new emitters.
PK RKM kll/emitters/__init__.pyPK RKM& kll/emitters/emitters.py#!/usr/bin/env python3
'''
KLL Emitters Container Classes
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import kll.emitters.kiibohd.kiibohd as kiibohd
import kll.emitters.kll.kll as kll
import kll.emitters.none.none as none
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Emitters:
'''
Container class for KLL emitters
NOTES: To add a new emitter
- Add a new directory for your emitter (e.g. kiibohd)
- Add at least two files in this directory (.py and __init__.py)
- In .py have one class that inherits the Emitter class from common.emitter
- Add to list of emitters below
- Add import statement to the top of this file
- The control object allows access to the entire set of KLL datastructures
'''
def __init__(self, control):
'''
Emitter initialization
@param control: ControlStage object, used to access data from other stages
'''
# Default emitter
self.default = "kiibohd"
# Dictionary of Emitters
self.emitters = {
'kiibohd': kiibohd.Kiibohd(control),
'kll': kll.KLL(control),
'none': none.Drop(control)
}
def emitter_default(self):
'''
Returns string name of default emitter
'''
return self.default
def emitter_list(self):
'''
List of emitters available
'''
return list(self.emitters.keys())
def emitter(self, emitter):
'''
Returns an emitter object
'''
return self.emitters[emitter]
def command_line_args(self, args):
'''
Group parser fan-out for emitter command line arguments
@param args: Name space of processed arguments
'''
# Always process command line args in the same order
for key, emitter in sorted(self.emitters.items(), key=lambda x: x[0]):
emitter.command_line_args(args)
def command_line_flags(self, parser):
'''
Group parser fan-out for emitter for command line options
@param parser: argparse setup object
'''
# Always process command line flags in the same order
for key, emitter in sorted(self.emitters.items(), key=lambda x: x[0]):
emitter.command_line_flags(parser)
PK RKM kll/emitters/kiibohd/__init__.pyPK SM+ + kll/emitters/kiibohd/kiibohd.py#!/usr/bin/env python3
'''
KLL Kiibohd .h/.c File Emitter
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import os
import sys
from datetime import date
from kll.common.emitter import JsonEmitter, Emitter, TextEmitter
from kll.common.id import (
AnimationId,
CapId,
HIDId,
LayerId,
NoneId,
ScanCodeId,
TriggerId
)
from layouts.emitter import basic_c_defines
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Kiibohd(Emitter, TextEmitter, JsonEmitter):
'''
Kiibohd .h file emitter for KLL
'''
# List of required capabilities
required_capabilities = {
'A': 'animationIndex',
'CONS': 'consCtrlOut',
'NONE': 'noneOut',
'SYS': 'sysCtrlOut',
'USB': 'usbKeyOut',
'Layer': 'layerShift',
'LayerShift': 'layerShift',
'LayerLatch': 'layerLatch',
'LayerLock': 'layerLock',
}
# Code to capability mapping
code_to_capability = {
'Animation': 'animationIndex',
'Capability': None,
'ConsCode': 'consCtrlOut',
'Layer': 'layerShift',
'LayerShift': 'layerShift',
'LayerLatch': 'layerLatch',
'LayerLock': 'layerLock',
'None': 'none',
'ScanCode': None,
'SysCode': 'sysCtrlOut',
'USBCode': 'usbKeyOut',
}
# Optional required capabilities
# Used mostly for animationIndex
optional_required_capabilities = [
'A',
]
def __init__(self, control):
'''
Emitter initialization
@param control: ControlStage object, used to access data from other stages
'''
Emitter.__init__(self, control)
TextEmitter.__init__(self)
JsonEmitter.__init__(self)
# Script directory (relative location to default templates)
kll_module_dir = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
)
template_dir = os.path.join(kll_module_dir, 'templates')
# Defaults
self.map_template = os.path.join(template_dir, "kiibohdKeymap.h")
self.hid_template = os.path.join(template_dir, "kiibohd_usb_hid.h")
self.pixel_template = os.path.join(template_dir, "kiibohdPixelmap.c")
self.def_template = os.path.join(template_dir, "kiibohdDefs.h")
self.map_output = "generatedKeymap.h"
self.hid_output = "usb_hid.h"
self.pixel_output = "generatedPixelmap.c"
self.def_output = "kll_defs.h"
self.json_output = "kll.json"
self.kiibohd_debug = False
# Convenience
self.capabilities = None
self.capabilities_index = dict()
self.use_pixel_map = False # Default to disabling PixelMap (auto-enables if needed)
# Signal erroring due to an issue
# We may not want to exit immediately as we could find other potential issues that need fixing
self.error_exit = False
# Fill dictionary
self.fill_dict = {}
# USB Code Lookup for header emitter
self.usb_code_lookup = None
self.usb_c_defines = None
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.def_template = args.def_template
self.map_template = args.map_template
self.hid_template = args.hid_template
self.pixel_template = args.pixel_template
self.def_output = args.def_output
self.map_output = args.map_output
self.hid_output = args.hid_output
self.pixel_output = args.pixel_output
self.json_output = args.json_output
self.kiibohd_debug = args.kiibohd_debug
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mKiibohd Emitter Configuration\033[0m')
group.add_argument('--def-template', type=str, default=self.def_template,
help="Specify KLL define .h file template.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.def_template)
)
group.add_argument('--map-template', type=str, default=self.map_template,
help="Specify KLL map .h file template.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.map_template)
)
group.add_argument('--hid-template', type=str, default=self.hid_template,
help="Specify USB HID Lookup .h file template.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.hid_template)
)
group.add_argument('--pixel-template', type=str, default=self.pixel_template,
help="Specify KLL pixel map .c file template.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.pixel_template)
)
group.add_argument('--def-output', type=str, default=self.def_output,
help="Specify KLL define .h file output.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.def_output)
)
group.add_argument('--map-output', type=str, default=self.map_output,
help="Specify KLL map .h file output.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.map_output)
)
group.add_argument('--hid-output', type=str, default=self.hid_output,
help="Specify USB HID Lookup .h file output.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.hid_output)
)
group.add_argument('--pixel-output', type=str, default=self.pixel_output,
help="Specify KLL map .h file output.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.pixel_output)
)
group.add_argument('--json-output', type=str, default=self.json_output,
help="Specify json output file for settings dictionary.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.json_output)
)
group.add_argument(
'--kiibohd-debug',
action='store_true',
default=self.kiibohd_debug,
help="Show debug info from kiibohd emitter.",
)
def check_file(self, filepath):
'''
Check file, make sure it exists
@param filepath: File path
@returns: True if path exists
'''
if not os.path.isfile(filepath):
print("{} Did not generate: {}".format(
ERROR,
os.path.abspath(filepath),
))
def output(self):
'''
Final Stage of Emitter
Generate desired outputs from templates
'''
if self.kiibohd_debug:
print("-- Generating --")
print(os.path.abspath(self.def_output))
print(os.path.abspath(self.map_output))
print(os.path.abspath(self.hid_output))
if self.use_pixel_map:
print(os.path.abspath(self.pixel_output))
print(os.path.abspath(self.json_output))
# Load define template and generate
self.load_template(self.def_template)
self.generate(self.def_output)
# Load keymap template and generate
self.load_template(self.map_template)
self.generate(self.map_output)
# Load hid lookup template and generate
self.load_template(self.hid_template)
self.generate(self.hid_output)
# Load pixelmap template and generate
if self.use_pixel_map:
self.load_template(self.pixel_template)
self.generate(self.pixel_output)
# Remove file if it exists, and create an empty file
else:
open(self.pixel_output, 'w').close()
# Generate Json Output
self.generate_json(self.json_output)
# Make sure files were generated
self.check_file(self.def_output)
self.check_file(self.map_output)
if self.use_pixel_map:
self.check_file(self.pixel_output)
self.check_file(self.json_output)
def byte_split(self, number, total_bytes):
'''
Converts and integer number into a defined length list of byte sized integers
Used to convert a large number into 8 bit chunks so it can fit inside a C byte array.
Little endian byte order is used.
'''
# If negative, used signed mode
# In general we output unsigned, but in some cases we need a negative
# For these cases each context can handle the signed integers
negative = number < 0 and True or False
# XXX Yes, little endian from how the uC structs work
byte_form = number.to_bytes(
total_bytes,
byteorder='little',
signed=negative,
)
# Convert into a list of strings
return ["{0}".format(int(byte)) for byte in byte_form]
def result_combo_conversion(self, combo=None):
'''
Converts a result combo (list of Ids) to the C array string format
@param combo: List of Ids/capabilities
@return: C array string format
'''
# If result_elem is None, assume 0-length USB code
if combo is None:
# , ,
return "1, {0}, 0x00".format(self.capabilities_index[self.required_capabilities['USB']])
# Determine length of combo
output = "{0}".format(len(combo))
# Iterate over each trigger identifier
for index, identifier in enumerate(combo):
# Lookup capability index
cap = "/* XXX INVALID XXX */"
# HIDId
if isinstance(identifier, HIDId):
# Lookup capability index
cap_index = self.capabilities_index[self.required_capabilities[identifier.second_type]]
cap_arg = ""
# Check for a split argument (e.g. Consumer codes)
if identifier.width() > 1:
cap_arg = ", ".join(
self.byte_split(identifier.get_uid(), identifier.width())
)
# Do not lookup hid define if USB Keycode and >= 0xF0
# These are unofficial HID codes, report error
elif identifier.second_type == 'USB' and identifier.get_uid() >= 0xF0:
print("{0} '{1}' Invalid USB HID code, missing FuncMap layout (e.g. stdFuncMap, lcdFuncMap)".format(
ERROR,
identifier
))
cap_arg = "/* XXX INVALID {0} */".format(identifier)
self.error_exit = True
# Otherwise use the C define instead of the number (increases readability)
else:
try:
cap_arg = self.usb_code_lookup[identifier.type][identifier.get_hex_str()]
except KeyError as err:
print("{0} {1} HID lookup kll bug...please report.".format(ERROR, err))
self.error_exit = True
cap = "{0}, {1}".format(cap_index, cap_arg)
# None - Is instance of CapId, so has to be first
elif isinstance(identifier, NoneId):
# , ,
return "1, {0}, 0x00".format(self.capabilities_index[self.required_capabilities['USB']])
# Capability
elif isinstance(identifier, CapId):
# Lookup capability index
cap_index = self.capabilities_index[identifier.name]
# Check if we need to add arguments to capability
if identifier.total_arg_bytes(self.capabilities.data) > 0:
cap_lookup = self.capabilities.data[identifier.name].association
cap = "{0}".format(cap_index)
for arg, lookup in zip(identifier.arg_list, cap_lookup.arg_list):
cap += ", "
cap += ", ".join(self.byte_split(arg.value, lookup.width))
# Otherwise, no arguments necessary
else:
cap = "{0}".format(cap_index)
# AnimationId
elif isinstance(identifier, AnimationId):
# Lookup capability index
cap_index = self.capabilities_index[self.required_capabilities[identifier.second_type]]
cap_arg = ""
# Lookup animation setting index
settings_index = 0
lookup_id = "{0}".format(identifier)
animation_settings_list = self.control.stage('DataAnalysisStage').animation_settings_list
if lookup_id not in animation_settings_list:
print("{0} Unknown animation '{1}'".format(ERROR, lookup_id))
self.error_exit = True
else:
settings_index = animation_settings_list.index(lookup_id)
# Check for a split argument (e.g. Consumer codes)
if identifier.width() > 1:
cap_arg = ", ".join(
self.byte_split(settings_index, identifier.width())
)
cap = "{0}, {1}".format(cap_index, cap_arg)
# LayerId
elif isinstance(identifier, LayerId):
# Lookup capabilityIndex
cap_index = self.capabilities_index[self.required_capabilities[identifier.type]]
cap_arg = ""
layer_number = identifier.uid
# Check for a split argument (e.g. Consumer codes)
if identifier.width() > 1:
cap_arg = ", ".join(
self.byte_split(layer_number, identifier.width())
)
cap = "{0}, {1}".format(cap_index, cap_arg)
# Unknown/Invalid Id
else:
print("{0} Unknown identifier({1}) -> {2}".format(
ERROR,
identifier.__class__.__name__,
identifier,
))
self.error_exit = True
# Generate identifier string for element of the combo
output += ", {0}".format(
cap,
)
return output
def trigger_combo_conversion(self, combo):
'''
Converts a trigger combo (list of Ids) to the C array string format
@param combo: List of Ids/capabilities
@return: C array string format
'''
# Determine length of combo
output = "{0}".format(len(combo))
# Iterate over each trigger identifier
for index, identifier in enumerate(combo):
# Construct trigger combo
trigger = "/* XXX INVALID XXX */"
# TODO Add support for Analog keys
# TODO Add support for non-press states
uid = identifier.get_uid()
trigger_type = "/* XXX INVALID TYPE XXX */"
state = "ScheduleType_P"
no_error = False
# ScanCodeId
if isinstance(identifier, ScanCodeId):
no_error = True
# Determine the type and adjust uid
if uid < 256:
trigger_type = "TriggerType_Switch1"
elif uid < 512:
trigger_type = "TriggerType_Switch2"
uid -= 256
elif uid < 768:
trigger_type = "TriggerType_Switch3"
uid -= 512
elif uid < 1024:
trigger_type = "TriggerType_Switch4"
uid -= 768
else:
no_error = False
# LayerId
elif isinstance(identifier, LayerId):
no_error = True
# Determine the type and adjust uid
if uid < 256:
trigger_type = "TriggerType_Layer1"
elif uid < 512:
trigger_type = "TriggerType_Layer2"
uid -= 256
elif uid < 768:
trigger_type = "TriggerType_Layer3"
uid -= 512
elif uid < 1024:
trigger_type = "TriggerType_Layer4"
uid -= 768
else:
no_error = False
# Determine additional state information to encode
# OR additional information onto trigger state
if identifier.type == 'LayerShift':
state += " | ScheduleType_Shift"
elif identifier.type == 'Layer':
pass
elif identifier.type == 'LayerLatch':
state += " | ScheduleType_Latch"
elif identifier.type == 'LayerLock':
state += " | ScheduleType_Lock"
else:
no_error = False
# AnimationId
elif isinstance(identifier, AnimationId):
no_error = True
# Retrieve uid of animation
animation_uid_lookup = self.control.stage('DataAnalysisStage').animation_uid_lookup
uid = animation_uid_lookup[identifier.name]
# Retrieve state
# TODO (HaaTa) Cannot use set directly here if using Off state...
states = set(identifier.strSchedule())
# Default to either Repeat or Done
state = ""
if states == set(['R', 'D']) or len(states) == 0:
state = "ScheduleType_Repeat | ScheduleType_Done"
# Repeat
elif 'R' in states:
state = "ScheduleType_Repeat"
# Done
elif 'D' in states:
state = "ScheduleType_Done"
# Invalid
else:
no_error = False
# Determine the type and adjust uid
if uid < 256:
trigger_type = "TriggerType_Animation1"
elif uid < 512:
trigger_type = "TriggerType_Animation2"
uid -= 256
elif uid < 768:
trigger_type = "TriggerType_Animation3"
uid -= 512
elif uid < 1024:
trigger_type = "TriggerType_Animation4"
uid -= 768
else:
no_error = False
# IndCode HIDId
elif isinstance(identifier, HIDId) and identifier.type == 'IndCode':
no_error = True
# Determine the type and adjust uid
if uid < 256:
trigger_type = "TriggerType_LED1"
# Check if states are given
states = identifier.strSchedule()
if len(states) > 0:
state_list = []
if 'A' in states:
state_list.append("ScheduleType_A")
if 'On' in states:
state_list.append("ScheduleType_On")
if 'D' in states:
state_list.append("ScheduleType_D")
if 'Off' in states:
state_list.append("ScheduleType_Off")
state = " | ".join(state_list)
else:
no_error = False
# TriggerId
elif isinstance(identifier, TriggerId):
no_error = True
# No need to decode as a TriggerId has all the necessary information ready
trigger_type = identifier.idcode
# However, for the types that are known, use the full name
lookup = {
0x00: 'TriggerType_Switch1',
0x01: 'TriggerType_Switch2',
0x02: 'TriggerType_Switch3',
0x03: 'TriggerType_Switch4',
0x04: 'TriggerType_LED1',
0x05: 'TriggerType_Analog1',
0x06: 'TriggerType_Analog2',
0x07: 'TriggerType_Analog3',
0x08: 'TriggerType_Analog4',
0x09: 'TriggerType_Layer1',
0x0A: 'TriggerType_Layer2',
0x0B: 'TriggerType_Layer3',
0x0C: 'TriggerType_Layer4',
0x0D: 'TriggerType_Animation1',
0x0E: 'TriggerType_Animation2',
0x0F: 'TriggerType_Animation3',
0x10: 'TriggerType_Animation4',
0x11: 'TriggerType_Sleep1',
0x12: 'TriggerType_Resume1',
0x13: 'TriggerType_Inactive1',
0x14: 'TriggerType_Active1',
0x15: 'TriggerType_Rotation1',
0xFF: 'TriggerType_Debug',
}
if trigger_type in lookup.keys():
trigger_type = lookup[trigger_type]
uid = identifier.uid
# Rotations use state differently
if trigger_type == 'TriggerType_Rotation1':
state = identifier.parameters[0]
# Unknown/Invalid Id
else:
print("{0} Unknown identifier -> {1}".format(ERROR, identifier))
self.error_exit = True
# Set trigger if there wasn't an error
if no_error:
# , ,
trigger = "{0}, {1}, 0x{2:02X}".format(trigger_type, state, uid)
# Generate identifier string for element of the combo
output += ", {0}".format(
trigger,
)
return output
def animation_frameset(self, name, aniframe):
'''
Generates an animation frame set, and may also generate filler frames if necessary
@param name: Name for animation
@param aniframe: Animation frame data
'''
# Frame set header
self.fill_dict['AnimationFrames'] += "//// {0} Animation Frame Set ////\n".format(
name
)
self.fill_dict['AnimationFrames'] += "const uint8_t *{0}_frames[] = {{".format(
name
)
# Generate entry for each of the frames (even blank inbetweens)
for index in range(1, aniframe + 1):
self.fill_dict['AnimationFrames'] += "\n\t{0}_frame{1},".format(
name,
index
)
self.fill_dict['AnimationFrames'] += "\n\t0\n};\n\n\n"
def animation_modifier_set(self, animation, name):
'''
Check if false or None and set to 0, otherwise as argument
name is the name of the animation modifier
'''
modifier = animation.getModifier(name)
# Simple modifier
simple_mods = ['pos', 'loops', 'framedelay', 'divmask', 'frame']
if name in simple_mods:
if not modifier or modifier is None:
return 0
return modifier
if name == 'pfunc':
if not modifier or modifier is None or modifier == 'off':
return 0
if modifier.arg == 'interp':
return 1
print("{0} '{1}:{2}' is unsupported".format(WARNING, name, modifier))
return 0
if name == 'ffunc':
if not modifier or modifier is None or modifier == 'off':
return 0
print("{0} '{1}:{2}' is unsupported".format(WARNING, name, modifier))
return 0
if name == 'replace':
if not modifier or modifier is None or modifier.arg == 'stack':
return 0
if modifier.arg == 'basic':
return 1
if modifier.arg == 'all':
return 2
if modifier.arg == 'state':
return 3
if modifier.arg == 'clear':
return 4
print("{0} '{1}:{2}' is unsupported".format(WARNING, name, modifier))
return 0
print("{0} '{1}:{2}' is unsupported".format(WARNING, name, modifier))
return 0
def animation_settings_entry(self, animation, animation_name, count, additional=False):
'''
Build an animation settings string entry
'''
# For each animation index store the default settings
# - Set to 1 if start in default settings
a_start = 1
a_pause = 1
a_stop = 1
a_single = 1
if not animation.getModifier('start'):
a_start = 0
if not animation.getModifier('pause'):
a_pause = 0
if not animation.getModifier('stop'):
a_stop = 0
if not animation.getModifier('single'):
a_single = 0
# - Animation id (Animation__)
a_name = animation_name
# - Frame position
a_pos = self.animation_modifier_set(animation, 'pos')
# - Sub frame position
a_subpos = 0
# - Number of loops, set to 0 for infinite
a_loops = self.animation_modifier_set(animation, 'loops')
if animation.getModifier('loop'):
a_loops = 0
# - Frame delay
a_framedelay = self.animation_modifier_set(animation, 'framedelay')
# - Frame option
a_frameoption = []
# - frameoption Frame stretch
if animation.getModifier('framestretch'):
a_frameoption.append("PixelFrameOption_FrameStretch")
# - Frame function index
a_ffunc = self.animation_modifier_set(animation, 'ffunc')
# - Pixel function index
a_pfunc = self.animation_modifier_set(animation, 'pfunc')
# - Replacement mode
a_replace = self.animation_modifier_set(animation, 'replace')
# - Animation play state (defaults to Paused if not set)
if a_pause == 1:
a_state = "AnimationPlayState_Pause"
elif a_stop == 1:
a_state = "AnimationPlayState_Stop"
elif a_start == 1:
a_state = "AnimationPlayState_Start"
elif a_single == 1:
a_state = "AnimationPlayState_Single"
else:
a_state = "AnimationPlayState_Pause"
# Determine what to set a_frameoption
a_frameoption_str = "PixelFrameOption_None"
for option in a_frameoption:
if a_frameoption_str == "PixelFrameOption_None":
a_frameoption_str = option
else:
a_frameoption_str += " | {}".format(option)
# Do not set a_initial if this is an additional (non-default) animation settings entry
a_initial = 1
if additional:
a_initial = 0
return "\n\t{{ (TriggerMacro*){2}, {3}, /*{0} {1}*/\n\t\t{4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12}}},".format(
count,
animation,
# AnimationStackElement
a_initial,
a_name,
a_pos,
a_subpos,
a_loops,
a_framedelay,
a_frameoption_str,
a_ffunc,
a_pfunc,
a_replace,
a_state,
)
def process(self):
'''
Emitter Processing
Takes KLL datastructures and Analysis results then populates the fill_dict
The fill_dict is used populate the template files.
'''
# Acquire Datastructures
early_contexts = self.control.stage('DataOrganizationStage').contexts
full_context = self.control.stage('DataFinalizationStage').full_context
reduced_contexts = self.control.stage('DataAnalysisStage').reduced_contexts # Default + Partial
trigger_index = self.control.stage('DataAnalysisStage').trigger_index
trigger_index_reduced = self.control.stage('DataAnalysisStage').trigger_index_reduced
result_index = self.control.stage('DataAnalysisStage').result_index
trigger_index_reduced_lookup = self.control.stage('DataAnalysisStage').trigger_index_reduced_lookup
trigger_index_lookup = self.control.stage('DataAnalysisStage').trigger_index_lookup
result_index_lookup = self.control.stage('DataAnalysisStage').result_index_lookup
min_scan_code = self.control.stage('DataAnalysisStage').min_scan_code
max_scan_code = self.control.stage('DataAnalysisStage').max_scan_code
trigger_lists = self.control.stage('DataAnalysisStage').trigger_lists
interconnect_scancode_offsets = self.control.stage('DataAnalysisStage').interconnect_scancode_offsets
interconnect_pixel_offsets = self.control.stage('DataAnalysisStage').interconnect_pixel_offsets
rotation_map = self.control.stage('DataAnalysisStage').rotation_map
scancode_positions = self.control.stage('DataAnalysisStage').scancode_positions
pixel_positions = self.control.stage('DataAnalysisStage').pixel_positions
pixel_display_mapping = self.control.stage('DataAnalysisStage').pixel_display_mapping
pixel_display_params = self.control.stage('DataAnalysisStage').pixel_display_params
animation_settings = self.control.stage('DataAnalysisStage').animation_settings
animation_settings_orig = self.control.stage('DataAnalysisStage').animation_settings_orig
animation_settings_list = self.control.stage('DataAnalysisStage').animation_settings_list
animation_uid_lookup = self.control.stage('DataAnalysisStage').animation_uid_lookup
# Build full list of C-Defines
layout_mgr = self.control.stage('PreprocessorStage').layout_mgr
self.usb_c_defines = basic_c_defines(layout_mgr.get_layout('default'))
self.usb_code_lookup = {
'USBCode': dict([(t[1], t[0]) for t in self.usb_c_defines[0]]),
'IndCode': dict([(t[1], t[0]) for t in self.usb_c_defines[1]]),
'SysCode': dict([(t[1], t[0]) for t in self.usb_c_defines[2]]),
'ConsCode': dict([(t[1], t[0]) for t in self.usb_c_defines[3]]),
}
# Setup json datastructures
animation_id_json = dict()
animation_settings_json = dict()
animation_settings_index_json = []
pixel_id_json = dict()
scancode_json = dict()
capabilities_json = dict()
defines_json = dict()
layers_json = dict()
# Build string list of compiler arguments
compilerArgs = ""
for arg in sys.argv:
if "--" in arg or ".py" in arg:
compilerArgs += "// {0}\n".format(arg)
else:
compilerArgs += "// {0}\n".format(arg)
# Build a string of modified files, if any
gitChangesStr = "\n"
if len(self.control.git_changes) > 0:
for gitFile in self.control.git_changes:
gitChangesStr += "// {0}\n".format(gitFile)
else:
gitChangesStr = " None\n"
def get_context_name(context_type, contexts=None):
'''
Retrieve context names and paths
@param context_type: Name of context type
@return: List of name and context paths
'''
if contexts is None:
contexts = early_contexts[context_type].query_contexts('AssignmentExpression', 'Array')
output = []
for sub in contexts:
name = "None"
if 'Name' in sub[0].data.keys():
name = sub[0].data['Name'].value
else:
print("{0} 'Name' field missing from '{1}' context".format(WARNING, context_type))
path = sub[1].parent.path
output.append((name, path))
return output
# Prepare BaseLayout and Layer Info
configLayoutInfo = ""
if 'ConfigurationContext' in early_contexts.keys():
for pair in get_context_name('ConfigurationContext'):
configLayoutInfo += "// {0}\n// {1}\n".format(pair[0], pair[1])
genericLayoutInfo = ""
if 'GenericContext' in early_contexts.keys():
for pair in get_context_name('GenericContext'):
genericLayoutInfo += "// {0}\n// {1}\n".format(pair[0], pair[1])
baseLayoutInfo = ""
if 'BaseMapContext' in early_contexts.keys():
for pair in get_context_name('BaseMapContext'):
baseLayoutInfo += "// {0}\n// {1}\n".format(pair[0], pair[1])
defaultLayerInfo = ""
if 'DefaultMapContext' in early_contexts.keys():
for pair in get_context_name('DefaultMapContext'):
defaultLayerInfo += "// {0}\n// {1}\n".format(pair[0], pair[1])
partialLayersInfo = ""
partial_context_list = [
(item[1].layer, item[0])
for item in early_contexts.items()
if 'PartialMapContext' in item[0]
]
for layer, tag in sorted(partial_context_list, key=lambda x: x[0]):
partialLayersInfo += "// Layer {0}\n".format(layer + 1)
contexts = early_contexts[tag].query_contexts('AssignmentExpression', 'Array')
for pair in get_context_name('PartialMapContext', contexts):
partialLayersInfo += "// {0}\n// {1}\n".format(pair[0], pair[1])
## Information ##
self.fill_dict['Information'] = "// This file was generated by the kll compiler, DO NOT EDIT.\n"
self.fill_dict['Information'] += "// Generation Date: {0}\n".format(date.today())
self.fill_dict['Information'] += "// KLL Emitter: {0}\n".format(
self.control.stage('CompilerConfigurationStage').emitter
)
self.fill_dict['Information'] += "// KLL Version: {0}\n".format(self.control.version)
self.fill_dict['Information'] += "// KLL Git Changes:{0}".format(gitChangesStr)
self.fill_dict['Information'] += "// Compiler arguments:\n{0}".format(compilerArgs)
self.fill_dict['Information'] += "//\n"
self.fill_dict['Information'] += "// - Configuration File -\n{0}".format(configLayoutInfo)
self.fill_dict['Information'] += "// - Generic Files -\n{0}".format(genericLayoutInfo)
self.fill_dict['Information'] += "// - Base Layer -\n{0}".format(baseLayoutInfo)
self.fill_dict['Information'] += "// - Default Layer -\n{0}".format(defaultLayerInfo)
self.fill_dict['Information'] += "// - Partial Layers -\n{0}".format(partialLayersInfo)
## Defines ##
self.fill_dict['Defines'] = ""
# Iterate through defines and lookup the variables
defines = full_context.query('NameAssociationExpression', 'Define')
variables = full_context.query('AssignmentExpression', 'Variable')
for dkey, dvalue in sorted(defines.data.items()):
if dvalue.name in variables.data.keys():
# TODO Handle arrays
if not isinstance(variables.data[dvalue.name].value, list):
value = variables.data[dvalue.name].value.replace('\n', ' \\\n')
self.fill_dict['Defines'] += "\n#define {0} {1}".format(
dvalue.association,
value,
)
defines_json[dvalue.name] = {
'name' : dvalue.association,
'value' : value,
}
else:
print("{0} '{1}' not defined...".format(WARNING, dvalue.name))
## Capabilities ##
self.fill_dict['CapabilitiesFuncDecl'] = ""
self.fill_dict['CapabilitiesList'] = "const Capability CapabilitiesList[] = {\n"
self.fill_dict['CapabilitiesIndices'] = "typedef enum CapabilityIndex {\n"
# Sorted by C Function name
self.capabilities = full_context.query('NameAssociationExpression', 'Capability')
self.capabilities_index = dict()
count = 0
safe_capabilities = [
# PartialMap
"layerState",
"layerLatch",
"layerLock",
"layerShift",
"layerRotate",
"testThreadSafe",
# USB
"consCtrlOut",
"noneOut",
"sysCtrlOut",
"usbKeyOut",
"mouseOut",
"mouseWheelOut",
"flashMode",
]
for dkey, dvalue in sorted(self.capabilities.data.items(), key=lambda x: x[1].association.name):
funcName = dvalue.association.name
argByteWidth = dvalue.association.total_arg_bytes()
features = "CapabilityFeature_Safe" if dkey in safe_capabilities else "CapabilityFeature_None"
self.fill_dict['CapabilitiesList'] += "\t/* {3} {4} */\n\t{{ {0}, {1}, {2} }},\n".format(
funcName,
argByteWidth,
features,
count,
dkey,
)
self.fill_dict['CapabilitiesFuncDecl'] += \
"void {0}( TriggerMacro *trigger, uint8_t state, uint8_t stateType, uint8_t *args );\n".format(funcName)
self.fill_dict['CapabilitiesIndices'] += "\t{0}_index,\n".format(funcName)
# Add to json
capabilities_json[dkey] = {
'args_count' : len(dvalue.association.arg_list),
'args' : [],
'name' : funcName,
'index' : count,
'features' : features,
}
for arg in dvalue.association.arg_list:
capabilities_json[dkey]['args'].append({
'name' : arg.name,
'width' : arg.width,
})
# Generate index for result lookup
self.capabilities_index[dkey] = count
count += 1
self.fill_dict['CapabilitiesList'] += "};"
self.fill_dict['CapabilitiesIndices'] += "} CapabilityIndex;"
# Validate that we have the required capabilities
for key, elem in self.required_capabilities.items():
if elem not in self.capabilities_index.keys():
if key not in self.optional_required_capabilities:
self.error_exit = True
print("{0} Required capability '{1}' for '{2}' is missing!".format(
ERROR,
elem,
key,
))
## Results Macros ##
self.fill_dict['ResultMacros'] = ""
# Iterate through each of the indexed result macros
# This is the full set of result macros, layers are handled separately
for index, result in enumerate(result_index):
self.fill_dict['ResultMacros'] += "Guide_RM( {0} ) = {{ ".format(index)
# Add the result macro capability index guide (including capability arguments)
# See kiibohd controller Macros/PartialMap/kll.h for exact formatting details
for seq_index, sequence in enumerate(result[0].results):
# If the sequence is longer than 1, prepend a sequence spacer
# Needed for USB behaviour, otherwise, repeated keys will not work
if seq_index > 0:
# , ,
self.fill_dict['ResultMacros'] += "{0}, ".format(self.result_combo_conversion())
# Iterate over each combo (element of the sequence)
for com_index, combo in enumerate(sequence):
# Convert capability and arguments to output spring
self.fill_dict['ResultMacros'] += "{0}, ".format(self.result_combo_conversion(combo))
# If sequence is longer than 1, append a sequence spacer at the end of the sequence
# Required by USB to end at sequence without holding the key down
if len(result[0].results[0]) > 1:
# , ,
self.fill_dict['ResultMacros'] += "{0}, ".format(self.result_combo_conversion())
# Add list ending 0 and end of list
self.fill_dict['ResultMacros'] += "0 }}; // {0}\n".format(
result[0].result_str()
)
self.fill_dict['ResultMacros'] = self.fill_dict['ResultMacros'][:-1] # Remove last newline
## Result Macro List ##
self.fill_dict['ResultMacroList'] = "const ResultMacro ResultMacroList[] = {\n"
# Iterate through each of the result macros
for index, result in enumerate(result_index):
# Include debug string for each result macro
self.fill_dict['ResultMacroList'] += "\tDefine_RM( {0} ), // {1}\n".format(
index,
result[0].result_str()
)
self.fill_dict['ResultMacroList'] += "};"
## Trigger Macros ##
self.fill_dict['TriggerMacros'] = ""
# Iterate through each of the trigger macros
for index, trigger in enumerate(trigger_index_reduced):
self.fill_dict['TriggerMacros'] += "Guide_TM( {0} ) = {{ ".format(index)
# Add the trigger macro scan code guide
# See kiibohd controller Macros/PartialMap/kll.h for exact formatting details
for seq_index, sequence in enumerate(trigger[0].triggers):
# Iterate over each combo (element of the sequence)
# For each combo, add the length, key type, key state and scan code
for com_index, combo in enumerate(sequence):
# Convert each combo into an array of bytes
self.fill_dict['TriggerMacros'] += "{0}, ".format(
self.trigger_combo_conversion(combo)
)
# Add list ending 0 and end of list
self.fill_dict['TriggerMacros'] += "0 }}; // {0}\n".format(
trigger[0].trigger_str()
)
self.fill_dict['TriggerMacros'] = self.fill_dict['TriggerMacros'][:-1] # Remove last newline
## Trigger Macro List ##
self.fill_dict['TriggerMacroList'] = "const TriggerMacro TriggerMacroList[] = {\n"
# Iterate through each of the trigger macros
for index, trigger in enumerate(trigger_index):
# Use TriggerMacro Index, and the corresponding ResultMacro Index, including debug string
self.fill_dict['TriggerMacroList'] += "\t/* {3} */ Define_TM( {0}, {1} ), // {2}\n".format(
trigger_index_reduced_lookup[trigger[0].sort_trigger()],
result_index_lookup[trigger[0].sort_result()],
trigger[0],
index
)
self.fill_dict['TriggerMacroList'] += "};"
## Trigger Macro Record ##
self.fill_dict['TriggerMacroRecord'] = "TriggerMacroRecord TriggerMacroRecordList[ TriggerMacroNum ];"
## Max Scan Code ##
self.fill_dict['MaxScanCode'] = "#define MaxScanCode 0x{0:X}".format(max(max_scan_code))
## Interconnect ScanCode Offset List ##
self.fill_dict['ScanCodeInterconnectOffsetList'] = "const uint8_t InterconnectOffsetList[] = {\n"
for index, offset in enumerate(interconnect_scancode_offsets):
self.fill_dict['ScanCodeInterconnectOffsetList'] += "\t0x{0:02X},\n".format(
offset
)
self.fill_dict['ScanCodeInterconnectOffsetList'] += "};"
## Max Interconnect Nodes ##
self.fill_dict['InterconnectNodeMax'] = "#define InterconnectNodeMax 0x{0:X}\n".format(
len(interconnect_scancode_offsets)
)
## Default Layer and Default Layer Scan Map ##
self.fill_dict['DefaultLayerTriggerList'] = ""
self.fill_dict['DefaultLayerScanMap'] = "const nat_ptr_t *default_scanMap[] = { \n"
# Iterate over triggerList and generate a C trigger array for the default map and default map array
for index, trigger_list in enumerate(trigger_lists[0][min_scan_code[0]:]):
trigger_list_len = 0
if trigger_list is not None:
trigger_list_len = len(trigger_list)
# Generate ScanCode index and triggerList length
self.fill_dict['DefaultLayerTriggerList'] += "Define_TL( default, 0x{0:02X} ) = {{ {1}".format(
index,
trigger_list_len
)
# Add scanCode trigger list to Default Layer Scan Map
self.fill_dict['DefaultLayerScanMap'] += "default_tl_0x{0:02X}, ".format(index)
# Add each item of the trigger list
if trigger_list_len > 0:
for trigger_code in trigger_list:
self.fill_dict['DefaultLayerTriggerList'] += ", {0}".format(trigger_code)
self.fill_dict['DefaultLayerTriggerList'] += " };\n"
self.fill_dict['DefaultLayerTriggerList'] = self.fill_dict['DefaultLayerTriggerList'][:-1] # Remove last newline
self.fill_dict['DefaultLayerScanMap'] = self.fill_dict['DefaultLayerScanMap'][:-2] # Remove last comma and space
self.fill_dict['DefaultLayerScanMap'] += "\n};"
## Partial Layers and Partial Layer Scan Maps ##
self.fill_dict['PartialLayerTriggerLists'] = ""
self.fill_dict['PartialLayerScanMaps'] = ""
# Iterate over each of the layers, excluding the default layer
for lay_index, layer in enumerate(trigger_lists):
# Skip first layer (already done)
if lay_index == 0:
continue
# Prepare each layer
self.fill_dict['PartialLayerScanMaps'] += "// Partial Layer {0}\n".format(lay_index)
self.fill_dict['PartialLayerScanMaps'] += "const nat_ptr_t *layer{0}_scanMap[] = {{ \n".format(lay_index)
self.fill_dict['PartialLayerTriggerLists'] += "// Partial Layer {0}\n".format(lay_index)
# Iterate over triggerList and generate a C trigger array for the layer
for trig_index, trigger_list in enumerate(layer[min_scan_code[lay_index]:max_scan_code[lay_index] + 1]):
# Generate ScanCode index and layer
self.fill_dict['PartialLayerTriggerLists'] += \
"Define_TL( layer{0}, 0x{1:02X} ) = {{".format(
lay_index,
trig_index,
)
# TriggerList length
if trigger_list is not None:
self.fill_dict['PartialLayerTriggerLists'] += " {0}".format(
len(trigger_list)
)
# Blank trigger (Dropped), zero length
else:
self.fill_dict['PartialLayerTriggerLists'] += " 0"
# Add scanCode trigger list to Default Layer Scan Map
self.fill_dict['PartialLayerScanMaps'] += "layer{0}_tl_0x{1:02X}, ".format(
lay_index,
trig_index,
)
# Add each item of the trigger list
if trigger_list is not None:
for trigger_code in trigger_list:
self.fill_dict['PartialLayerTriggerLists'] += ", {0}".format(
trigger_code
)
self.fill_dict['PartialLayerTriggerLists'] += " };\n"
self.fill_dict['PartialLayerTriggerLists'] += "\n"
self.fill_dict['PartialLayerScanMaps'] = self.fill_dict['PartialLayerScanMaps'][:-2] # Remove last comma and space
self.fill_dict['PartialLayerScanMaps'] += "\n};\n\n"
self.fill_dict['PartialLayerTriggerLists'] = self.fill_dict['PartialLayerTriggerLists'][:-2] # Remove last 2 newlines
self.fill_dict['PartialLayerScanMaps'] = self.fill_dict['PartialLayerScanMaps'][:-2] # Remove last 2 newlines
## Layer Index List ##
self.fill_dict['LayerIndexList'] = "const Layer LayerIndex[] = {\n"
# Iterate over each layer, adding it to the list
for layer, layer_context in enumerate(reduced_contexts):
# Generate stacked name (ignore capabilities.kll and scancode_map.kll)
stack_name = ""
for name in layer_context.files():
if name not in ["capabilities.kll", "scancode_map.kll"]:
stack_name += "{0} + ".format(name)
# Apply default name if using standard layout
if stack_name == "":
stack_name = "StandardLayer"
else:
stack_name = stack_name[:-3]
# Default map is a special case, always the first index
if layer == 0:
self.fill_dict['LayerIndexList'] += '\tLayer_IN( default_scanMap, "D: {1}", 0x{0:02X} ),\n'.format(min_scan_code[layer], stack_name)
else:
self.fill_dict['LayerIndexList'] += '\tLayer_IN( layer{0}_scanMap, "{0}: {2}", 0x{1:02X} ),\n'.format(layer, min_scan_code[layer], stack_name)
self.fill_dict['LayerIndexList'] += "};"
## Layer State ##
self.fill_dict['LayerState'] = "LayerStateType LayerState[ LayerNum ];"
## Layers JSON ##
# Layer 0 is the default map
# Layer 1+ are the partial maps
for layer, layer_context in enumerate(reduced_contexts):
layer_info = dict()
for key, mapped_trigger in sorted(layer_context.organization.mapping_data.data.items()):
layer_info[key] = {
'trigger' : mapped_trigger[0].triggersSequenceOfCombosOfIds(),
'result' : mapped_trigger[0].resultsSequenceOfCombosOfIds(),
'kll' : mapped_trigger[0].kllify()
}
layers_json[layer] = layer_info
## PixelId Physical Positions ##
for key, entry in sorted(pixel_positions.items()):
# Add physical pixel positions and ScanCode (if available) to json
pixel_id_json.setdefault(key, dict()).update(entry)
## ScanCode Physical Positions ##
for key, entry in sorted(scancode_positions.items()):
# Add physical scancode positions and PixelId (if available) to json
scancode_json.setdefault(key, dict()).update(entry)
## Rotation Trigger Parameters
max_rotations = 0
if rotation_map.keys():
max_rotations = max(rotation_map.keys())
self.fill_dict['RotationParameters'] = 'const uint8_t Rotation_MaxParameter[] = {\n'
for key, entry in sorted(rotation_map.items()):
self.fill_dict['RotationParameters'] += '\t{}, // {}\n'.format(
entry,
key,
)
self.fill_dict['RotationParameters'] += '};'
## Pixel Buffer Setup ##
# Only add sections if Pixel Buffer is defined
self.use_pixel_map = 'Pixel_Buffer_Size' in defines.data.keys()
self.fill_dict['AnimationList'] = ""
if self.use_pixel_map:
self.fill_dict['PixelBufferSetup'] = "PixelBuf Pixel_Buffers[] = {\n"
# Lookup number of buffers
bufsize = len(variables.data[defines.data['Pixel_Buffer_Size'].name].value)
for index in range(bufsize):
self.fill_dict['PixelBufferSetup'] += "\tPixelBufElem( {0}, {1}, {2}, {3} ),\n".format(
variables.data[defines.data['Pixel_Buffer_Length'].name].value[index],
variables.data[defines.data['Pixel_Buffer_Width'].name].value[index],
variables.data[defines.data['Pixel_Buffer_Size'].name].value[index],
variables.data[defines.data['Pixel_Buffer_Buffer'].name].value[index],
)
self.fill_dict['PixelBufferSetup'] += "};"
# Compute total number of channels
totalchannels = "{0} + {1}".format(
variables.data[defines.data['Pixel_Buffer_Length'].name].value[bufsize - 1],
variables.data[defines.data['Pixel_Buffer_Size'].name].value[bufsize - 1],
)
# Only include if defined
# XXX (HaaTa) This has to be done to make sure KLL compiler is still compatible with older KLL files
if 'LED_Buffer_Size' in variables.data.keys():
self.fill_dict['PixelBufferSetup'] += "\nPixelBuf LED_Buffers[] = {\n"
# Lookup number of buffers (LED)
ledbufsize = len(variables.data[defines.data['LED_Buffer_Size'].name].value)
for index in range(ledbufsize):
self.fill_dict['PixelBufferSetup'] += "\tPixelBufElem( {0}, {1}, {2}, {3} ),\n".format(
variables.data[defines.data['LED_Buffer_Length'].name].value[index],
variables.data[defines.data['LED_Buffer_Width'].name].value[index],
variables.data[defines.data['LED_Buffer_Size'].name].value[index],
variables.data[defines.data['LED_Buffer_Buffer'].name].value[index],
)
self.fill_dict['PixelBufferSetup'] += "};"
# Add LED fade group(s)
self.fill_dict['PixelFadeConfig'] = ""
ledgroupsize = len(variables.data[defines.data['KLL_LED_FadeGroup'].name].value)
for index in range(ledgroupsize):
self.fill_dict['PixelFadeConfig'] += "const uint16_t Pixel_LED_DefaultFadeGroup{}[] = {{\n".format(
index
)
data = variables.data[defines.data['KLL_LED_FadeGroup'].name].value[index]
if data != "":
self.fill_dict['PixelFadeConfig'] += "\t{}\n".format(data)
self.fill_dict['PixelFadeConfig'] += "};\n"
self.fill_dict['PixelFadeConfig'] += "const PixelLEDGroupEntry Pixel_LED_DefaultFadeGroups[] = {\n"
for index in range(ledgroupsize):
# Count number of elements
data = variables.data[defines.data['KLL_LED_FadeGroup'].name].value[index]
count = len(data.split(','))
if data == "":
count = 0
self.fill_dict['PixelFadeConfig'] += "\t{{ {}, Pixel_LED_DefaultFadeGroup{} }},\n".format(
count,
index,
)
self.fill_dict['PixelFadeConfig'] += "};\n"
# Add fade periods
self.fill_dict['PixelFadeConfig'] += "const PixelPeriodConfig Pixel_LED_FadePeriods[16] = {\n"
periodgroupsize = len(variables.data[defines.data['KLL_LED_FadePeriod'].name].value)
for index in range(periodgroupsize):
# Construct array
self.fill_dict['PixelFadeConfig'] += "\t{}, // {}\n".format(
variables.data[defines.data['KLL_LED_FadePeriod'].name].value[index],
index,
)
self.fill_dict['PixelFadeConfig'] += "};\n"
def fade_default_config(name):
fadeconfigsize = len(variables.data[defines.data[name].name].value)
self.fill_dict['PixelFadeConfig'] += "\t{ "
for index in range(fadeconfigsize):
self.fill_dict['PixelFadeConfig'] += "{}, ".format(
variables.data[defines.data[name].name].value[index]
)
self.fill_dict['PixelFadeConfig'] += "}}, // {}\n".format(name)
# Add fade configs
self.fill_dict['PixelFadeConfig'] += "const uint8_t Pixel_LED_FadePeriod_Defaults[4][4] = {\n"
fade_default_config('KLL_LED_FadeDefaultConfig0')
fade_default_config('KLL_LED_FadeDefaultConfig1')
fade_default_config('KLL_LED_FadeDefaultConfig2')
fade_default_config('KLL_LED_FadeDefaultConfig3')
self.fill_dict['PixelFadeConfig'] += "};"
# Compute total number of channels (LED)
totalchannels = "{0} + {1}".format(
variables.data[defines.data['LED_Buffer_Length'].name].value[ledbufsize - 1],
variables.data[defines.data['LED_Buffer_Size'].name].value[ledbufsize - 1],
)
## Pixel Mapping ##
## ScanCode to Pixel Mapping ##
pixel_indices = full_context.query('MapExpression', 'PixelChannel')
self.fill_dict['PixelMapping'] = "const PixelElement Pixel_Mapping[] = {\n"
self.fill_dict['ScanCodeToPixelMapping'] = "const uint16_t Pixel_ScanCodeToPixel[] = {\n"
self.fill_dict['ScanCodeToDisplayMapping'] = "const uint16_t Pixel_ScanCodeToDisplay[] = {\n"
# Add row, column of Pixel to json (mirror lookup to Scan Code Positions as well)
for y, elem in enumerate(pixel_display_mapping):
for x, pixelid in enumerate(elem):
entry = {'Row': y, 'Col': x}
pixel_uid = pixelid + 1
pixel_id_json.setdefault(pixel_uid, dict()).update(entry)
if 'ScanCode' in pixel_id_json[pixel_uid].keys():
scancode_uid = pixel_id_json[pixel_uid]['ScanCode']
scancode_json[scancode_uid].update(entry)
last_uid = 0
last_scancode = 0
for key, item in sorted(pixel_indices.data.items(), key=lambda x: x[1].pixel.uid.index):
last_uid += 1
last_scancode += 1
# If last_uid isn't directly before, insert placeholder(s)
while last_uid != item.pixel.uid.index:
self.fill_dict['PixelMapping'] += "\tPixel_Blank(), // {0}\n".format(last_uid)
last_uid += 1
# Lookup width and number of channels
width = item.pixel.channels[0].width
channels = len(item.pixel.channels)
self.fill_dict['PixelMapping'] += "\t{{ {0}, {1}, {{".format(width, channels)
# Iterate over the channels (assuming same width)
for ch in range(channels):
# Add comma if not first channel
if ch != 0:
self.fill_dict['PixelMapping'] += ","
self.fill_dict['PixelMapping'] += "{0}".format(item.pixel.channels[ch].uid)
self.fill_dict['PixelMapping'] += "}} }}, // {0}\n".format(key)
# Skip if not mapped to a scancode
if isinstance(item.position, list):
continue
# Add ScanCodeToPixelMapping entry
# Add ScanCodeToDisplayMapping entry
while item.position.uid != last_scancode and item.position.uid >= last_scancode:
# Fill in unused scancodes
self.fill_dict['ScanCodeToPixelMapping'] += "\t/*{0}*/ 0,\n".format(last_scancode)
self.fill_dict['ScanCodeToDisplayMapping'] += "\t/*__,__ {0}*/ 0,\n".format(last_scancode)
last_scancode += 1
self.fill_dict['ScanCodeToPixelMapping'] += "\t/*{0}*/ {1}, // {2}\n".format(
last_scancode,
item.pixel.uid.index,
key
)
# Find Pixel_DisplayMapping offset
offset_row = 0
offset_col = 0
offset = 0
for y_list in pixel_display_mapping:
for x_item in y_list:
if x_item == item.pixel.uid.index:
offset = offset_row * pixel_display_params['Columns'] + offset_col
break
offset_col += 1
# Offset found
if offset != 0:
break
offset_row += 1
offset_col = 0
self.fill_dict['ScanCodeToDisplayMapping'] += "\t/*{3: >2},{4: >2} {0}*/ {1}, // {2}\n".format(
last_scancode,
offset,
key,
offset_col,
offset_row,
)
totalpixels = last_uid
self.fill_dict['PixelMapping'] += "};"
self.fill_dict['ScanCodeToPixelMapping'] += "};"
self.fill_dict['ScanCodeToDisplayMapping'] += "};"
## Pixel Display Mapping ##
self.fill_dict['PixelDisplayMapping'] = "const uint16_t Pixel_DisplayMapping[] = {\n"
for y_list in pixel_display_mapping:
self.fill_dict['PixelDisplayMapping'] += \
",".join("{0: >3}".format(x) for x in y_list) + ",\n"
self.fill_dict['PixelDisplayMapping'] += "};"
## Animations ##
# TODO - Use reduced_contexts and generate per-layer (naming gets tricky)
# Currently using full_context which is not as configurable
self.fill_dict['Animations'] = "const uint8_t **Pixel_Animations[] = {"
self.fill_dict['AnimationSettings'] = "const AnimationStackElement Pixel_AnimationSettings[] = {"
self.fill_dict['AnimationList'] = ""
animations = full_context.query('DataAssociationExpression', 'Animation')
count = 0
for key, animation in sorted(animations.data.items()):
# Lookup uid
uid = animation_uid_lookup[animation.association.name]
# Name each frame collection
self.fill_dict['Animations'] += "\n\t/*{0}*/ {1}_frames,".format(
uid,
animation.association.name,
)
# Add animation name to list
animation_name = "Animation__{0}".format(
animation.association.name
)
self.fill_dict['AnimationList'] += "\n#define {0} {1}".format(
animation_name,
uid,
)
# Map index to name (json)
animation_id_json[animation.association.name] = uid
# Animation Settings Index JSON entry
animation_entry_json = animation.association.json()
animation_entry_json.update(animation.value.json())
animation_settings_index_json.append(animation_entry_json)
# Generate animation settings string entry
self.fill_dict['AnimationSettings'] += self.animation_settings_entry(
animation.value,
animation_name,
uid,
additional=False,
)
count += 1
self.fill_dict['Animations'] += "\n};"
# Additional Animation Settings
self.fill_dict['AnimationSettings'] += "\n\n\t/* Additional Settings */\n"
while count < len(animation_settings_list):
animation = animation_settings[animation_settings_list[count]]
animation_orig = animation_settings_orig[animation_settings_list[count]]
animation_name = "Animation__{0}".format(
animation.name
)
# Animation Settings JSON entry
animation_settings_json["{}".format(animation_orig)] = count
# Animation Settings Index JSON entry
animation_settings_index_json.append(animation.json())
# Generate animation settings string entry
self.fill_dict['AnimationSettings'] += self.animation_settings_entry(
animation,
animation_name,
count,
additional=True,
)
count += 1
self.fill_dict['AnimationSettings'] += "\n};"
## Animation Frames ##
# TODO - Use reduced_contexts and generate per-layer (naming gets tricky)
# Currently using full_context which is not as configurable
self.fill_dict['AnimationFrames'] = ""
animation_frames = full_context.query('DataAssociationExpression', 'AnimationFrame')
prev_aniframe_name = ""
prev_aniframe = 0
for key, aniframe in sorted(animation_frames.data.items(), key=lambda x: (x[1].association[0].name, x[1].association[0].index)):
aniframeid = aniframe.association[0]
aniframedata = aniframe.value
name = aniframeid.name
# Generate frame-set
if prev_aniframe_name != "" and name != prev_aniframe_name:
self.animation_frameset(prev_aniframe_name, prev_aniframe)
# Reset frame count
prev_aniframe = 0
# Fill in frames if necessary
while aniframeid.index > prev_aniframe + 1:
prev_aniframe += 1
self.fill_dict['AnimationFrames'] += "const uint8_t {0}_frame{1}[] = {{ PixelAddressType_End }};\n\n".format(
name,
prev_aniframe
)
prev_aniframe_name = name
# Address type lookup for frames
# See Macros/PixelMap/pixel.h for list of types
address_type = {
'PixelAddressId_Index': 'PixelAddressType_Index',
'PixelAddressId_Rect': 'PixelAddressType_Rect',
'PixelAddressId_ColumnFill': 'PixelAddressType_ColumnFill',
'PixelAddressId_RowFill': 'PixelAddressType_RowFill',
'PixelAddressId_ScanCode': 'PixelAddressType_ScanCode',
'PixelAddressId_RelativeRect': 'PixelAddressType_RelativeRect',
'PixelAddressId_RelativeColumnFill': 'PixelAddressType_RelativeColumnFill',
'PixelAddressId_RelativeRowFill': 'PixelAddressType_RelativeRowFill',
}
# Frame information
self.fill_dict['AnimationFrames'] += "// {0}".format(
aniframe.kllify()
)
# Generate frame
self.fill_dict['AnimationFrames'] += "\nconst uint8_t {0}_frame{1}[] = {{".format(
name,
aniframeid.index
)
# XXX (HaaTa) This is a bug, but for now this is ok
if len(aniframedata) == 1 and isinstance(aniframedata[0], list):
aniframedata = aniframedata[0]
for elem in aniframedata:
# TODO Determine widths (possibly do checks at an earlier stage to validate)
if isinstance(elem, list):
elem = elem[0]
# Select pixel address type
self.fill_dict['AnimationFrames'] += "\n\t{0},".format(
address_type[elem.uid.inferred_type()]
)
# For each channel select a pixel address
channels = elem.uid.uid_set()
channel_str = "/* UNKNOWN CHANNEL {0} */".format(len(channels))
if len(channels) == 1:
channel_str = " /*{0}*/{1},".format(
channels[0],
",".join(self.byte_split(channels[0], 4))
)
elif len(channels) == 2:
channel_str = ""
for index, ch in enumerate(channels):
value = 0
# Convert to pixelmap position as we defined a percentage
if isinstance(ch, float):
# Calculate percentage of displaymap
if index == 0:
value = (pixel_display_params['Columns'] - 1) * ch
elif index == 1:
value = (pixel_display_params['Rows'] - 1) * ch
value = int(round(value))
# No value, set to 0
elif ch is None:
value = 0
# Otherwise it's an integer
else:
value = int(ch)
channel_str += " /*{0}*/{1},".format(
ch, ",".join(self.byte_split(value, 2)),
)
self.fill_dict['AnimationFrames'] += channel_str
# For each channel, select an operator and value
for pixelmod in elem.modifiers:
# Set operator type
channel_str = " PixelChange_{0},".format(
pixelmod.operator_type()
)
# Set channel value
# TODO Support non-8bit values
channel_str += " {0},".format(pixelmod.value)
self.fill_dict['AnimationFrames'] += channel_str
self.fill_dict['AnimationFrames'] += "\n\tPixelAddressType_End\n};\n\n"
# Set frame number, for next frame evaluation
prev_aniframe = aniframeid.index
# Last frame set
if prev_aniframe_name != "":
self.animation_frameset(prev_aniframe_name, prev_aniframe)
## LED Buffer Struct ##
if 'LED_BufferStruct' in variables.data.keys():
self.fill_dict['LEDBufferStruct'] = variables.data['LED_BufferStruct'].value
else:
self.fill_dict['LEDBufferStruct'] = ""
## ScanCode Physical Positions ##
scancode_physical = full_context.query('DataAssociationExpression', 'ScanCodePosition')
self.fill_dict['KeyPositions'] = "const Position Key_Positions[] = {\n"
for key, item in sorted(scancode_physical.data.items(), key=lambda x: x[1].association[0].get_uid()):
entry = dict()
# Acquire each dimension
entry['x'] = item.association[0].x
entry['y'] = item.association[0].y
entry['z'] = item.association[0].z
entry['rx'] = item.association[0].rx
entry['ry'] = item.association[0].ry
entry['rz'] = item.association[0].rz
# Check each dimension, set to 0 if None
for k in entry.keys():
if entry[k] is None:
entry[k] = 0.0
else:
entry[k] = float(entry[k])
# Generate PositionEntry
self.fill_dict['KeyPositions'] += "\tPositionEntry( {0}, {1}, {2}, {3}, {4}, {5} ), // {6}\n".format(
entry['x'],
entry['y'],
entry['z'],
entry['rx'],
entry['ry'],
entry['rz'],
item,
)
self.fill_dict['KeyPositions'] += "};"
## KLL Defines ##
self.fill_dict['KLLDefines'] = ""
self.fill_dict['KLLDefines'] += "#define CapabilitiesNum_KLL {0}\n".format(len(self.capabilities_index))
self.fill_dict['KLLDefines'] += "#define LayerNum_KLL {0}\n".format(len(reduced_contexts))
self.fill_dict['KLLDefines'] += "#define ResultMacroNum_KLL {0}\n".format(len(result_index))
self.fill_dict['KLLDefines'] += "#define TriggerMacroNum_KLL {0}\n".format(len(trigger_index))
self.fill_dict['KLLDefines'] += "#define MaxScanCode_KLL {0}\n".format(max(max_scan_code))
self.fill_dict['KLLDefines'] += "#define RotationNum_KLL {0}\n".format(max_rotations)
# Only add defines if Pixel Buffer is defined
if self.use_pixel_map:
self.fill_dict['KLLDefines'] += "#define Pixel_BuffersLen_KLL {0}\n".format(bufsize)
self.fill_dict['KLLDefines'] += "#define Pixel_TotalChannels_KLL {0}\n".format(totalchannels)
self.fill_dict['KLLDefines'] += "#define Pixel_TotalPixels_KLL {0}\n".format(totalpixels)
self.fill_dict['KLLDefines'] += "#define Pixel_DisplayMapping_Cols_KLL {0}\n".format(
pixel_display_params['Columns']
)
self.fill_dict['KLLDefines'] += "#define Pixel_DisplayMapping_Rows_KLL {0}\n".format(
pixel_display_params['Rows']
)
self.fill_dict['KLLDefines'] += "#define Pixel_AnimationSettingsNum_KLL {0}\n".format(
len(animation_settings_list)
)
self.fill_dict['KLLDefines'] += "#define AnimationNum_KLL {0}\n".format(len(animations.data))
else:
self.fill_dict['KLLDefines'] += "#define AnimationNum_KLL 0\n"
## Define Validation ##
if 'stateWordSize' in variables.data.keys():
index_uint_t_size = int(variables.data['stateWordSize'].value)
total_index = max(len(trigger_index), len(result_index))
if total_index > 2 ** index_uint_t_size:
print("{} 'stateWordSize = {}' is not large enough! {} > {}".format(
ERROR,
index_uint_t_size,
total_index,
2 ** index_uint_t_size,
))
self.error_exit = True
## Generate USB HID Lookup ##
self.fill_dict['USBCDefineKeyboardMapping'] = ''
for pair in self.usb_c_defines[0]:
self.fill_dict['USBCDefineKeyboardMapping'] += "#define {} {}\n".format(*pair)
self.fill_dict['USBCDefineLEDMapping'] = ''
for pair in self.usb_c_defines[1]:
self.fill_dict['USBCDefineLEDMapping'] += "#define {} {}\n".format(*pair)
self.fill_dict['USBCDefineSystemControlMapping'] = ''
for pair in self.usb_c_defines[2]:
self.fill_dict['USBCDefineSystemControlMapping'] += "#define {} {}\n".format(*pair)
self.fill_dict['USBCDefineConsumerControlMapping'] = ''
for pair in self.usb_c_defines[3]:
self.fill_dict['USBCDefineConsumerControlMapping'] += "#define {} {}\n".format(*pair)
## Finish up JSON datastructures
# TODO Testing
# - Run trigger
# 1) Validate result (will need infra per capability)
# 2) Hook into animation testing?
# - Trigger Types
# 1) Switch
# 2) HID LED
# 3) Layer
# 4) Animation
# 5) Analog
self.json_dict['AnimationIds'] = animation_id_json
self.json_dict['AnimationSettings'] = animation_settings_json
self.json_dict['AnimationSettingsIndex'] = animation_settings_index_json
self.json_dict['PixelIds'] = pixel_id_json
self.json_dict['ScanCodes'] = scancode_json
self.json_dict['Capabilities'] = capabilities_json
self.json_dict['Defines'] = defines_json
self.json_dict['Layers'] = layers_json
self.json_dict['CodeLookup'] = self.code_to_capability
PK RKM kll/emitters/kll/__init__.pyPK RKM kll/emitters/kll/kll.py#!/usr/bin/env python3
'''
Re-Emits KLL files after processing. May do simplification.
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import os
from kll.common.emitter import Emitter, FileEmitter
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class KLL(Emitter, FileEmitter):
'''
Re-Emits KLL files, may simplify and re-order expressions.
'''
def __init__(self, control):
'''
Emitter initialization
@param control: ControlStage object, used to access data from other stages
'''
Emitter.__init__(self, control)
FileEmitter.__init__(self)
# Defaults
self.target_dir = "generated"
self.output_debug = False
self.kll_debug = False
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
self.target_dir = args.target_dir
self.output_debug = args.output_debug
self.kll_debug = args.kll_debug
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
# Create new option group
group = parser.add_argument_group('\033[1mKLL Emitter Configuration\033[0m')
group.add_argument('--target-dir', type=str, default=self.target_dir,
help="Target directory for generated files.\n"
"\033[1mDefault\033[0m: {0}\n".format(self.target_dir)
)
group.add_argument('--output-debug', action='store_true', default=self.output_debug,
help="Enable kll reconstitution in-file debug output.\n",
)
group.add_argument(
'--kll-debug',
action='store_true',
default=self.kll_debug,
help="Show debug info from kll emitter.",
)
def output(self):
'''
Final Stage of Emitter
Generate KLL files
'''
if self.kll_debug:
print("-- Generating --")
print(self.target_dir)
# Make sure output directory exists
os.makedirs(self.target_dir, exist_ok=True)
# Output list of files to disk
self.generate(self.target_dir)
def reconstitute_elem(self, elem, key):
'''
Re-constitute single element
May recurse if this is a list of elements
@param elem: Element to reconstitute
@param key: Identifier, used in debug output
@return: Re-constituted string
'''
# If the element is a list, iterate through it
if isinstance(elem, list):
output = ""
for index, subelem in enumerate(elem):
output += self.reconstitute_elem(subelem, "{0}[{1}]".format(key, index))
return output
# NOTE: Useful line when debugging issues
#print( type( elem ), elem )
# Otherwise format each element
if self.output_debug:
return "{0} # {1} # {2}\n".format(elem.kllify(), elem.regen_str(), key)
else:
return "{0}\n".format(elem.kllify())
def reconstitute_store(self, stores, name):
'''
Takes a list of organization stores and re-constitutes them into a kll file
@param stores: List of organization stores
@param name: Filename to call list of stores
@return: kll file contents
'''
output = ""
for store in stores:
# Show name of store
section_name = type(store).__name__
output += "# {0}\n".format(section_name)
# NOTE: Useful for debugging
#print( section_name )
# Sort by output string, rather than by key
for key, value in sorted(
store.data.items(),
key=lambda x: self.reconstitute_elem(x[1], x[0])
):
output += self.reconstitute_elem(value, key)
output += "\n"
self.output_files.append((name, output))
def process(self):
'''
Emitter Processing
Takes KLL datastructures and Analysis results then outputs them individually as kll files
'''
# Acquire Datastructures
early_contexts = self.control.stage('DataOrganizationStage').contexts
base_context = self.control.stage('DataFinalizationStage').base_context
default_context = self.control.stage('DataFinalizationStage').default_context
partial_contexts = self.control.stage('DataFinalizationStage').partial_contexts
full_context = self.control.stage('DataFinalizationStage').full_context
# Re-constitute KLL files using contexts of various stages
for key, context in early_contexts.items():
self.reconstitute_store(context.organization.stores(), "{0}.kll".format(key))
self.reconstitute_store(base_context.organization.stores(), "base.kll")
self.reconstitute_store(default_context.organization.stores(), "default.kll")
for index, partial in enumerate(partial_contexts):
self.reconstitute_store(partial.organization.stores(), "partial-{0}.kll".format(index))
self.reconstitute_store(full_context.organization.stores(), "final.kll")
PK RKM kll/emitters/none/__init__.pyPK RKMkQ
kll/emitters/none/none.py#!/usr/bin/env python3
'''
KLL Data Dropper (Doesn't emit anything)
'''
# Copyright (C) 2016-2018 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
from kll.common.emitter import Emitter
### Decorators ###
# Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
WARNING = '\033[5;1;33mWARNING\033[0m:'
### Classes ###
class Drop(Emitter):
'''
Doesn't emit at all, just ignores everything
'''
def __init__(self, control):
'''
Emitter initialization
@param control: ControlStage object, used to access data from other stages
'''
Emitter.__init__(self, control)
def command_line_args(self, args):
'''
Group parser for command line arguments
@param args: Name space of processed arguments
'''
def command_line_flags(self, parser):
'''
Group parser for command line options
@param parser: argparse setup object
'''
def output(self):
'''
Final Stage of Emitter
Nothing to do
'''
def process(self):
'''
Emitter Processing
Nothing to do, just dropping all the results
'''
PK RKM kll/examples/assignment.kllmyVariable = 1;
myArray[] = a b c "b c" 3;
myIndex[5] = "this text" thing; # Single element
myIndex[6] = moar;
myVariable => Variable_define;
myArray => Array_define;
myIndex => Index_define;
PK RKM|_K $ kll/examples/capabilitiesExample.kllName = ExampleModule;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
myCapability2 => myFunc2();
myCapability3 => myFunc2( myArg1 : 2 );
myCapability => myFunc( myArg1 : 1, myArg2 : 4 );
usbKeyOut => Output_usbCodeSend_capability( usbCode : 1 );
PK RKMZ Z kll/examples/colemak.kllName = colemak;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-09-07;
# Top Row
'e' : 'f';
'r' : 'p';
't' : 'g';
'y' : 'j';
'u' : 'l';
'i' : 'u';
'o' : 'y';
'p' : ';';
# Middle Row
's' : 'r';
'd' : 's';
'f' : 't';
'g' : 'd';
'j' : 'n';
'k' : 'e';
'l' : 'i';
';' : 'o';
# Bottom Row
'n' : 'k';
PK RKMA# # " kll/examples/defaultMapExample.kllName = Kishsaver;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.2d;
# Modified Date
Date = 2014-06-12;
S0x40 : U"Backspace";
S0x42 : U"]";
S0x43 : U"Delete";
S0x44 : U"Enter";
S0x46 : U"RShift";
S0x47 : U"RCtrl";
S0x48 : U"=";
S0x49 : U"-";
S0x4A : U"[";
S0x4B : U"\";
S0x4C : U"'";
S0x4D : U"/";
S0x4E : U"RGui";
S0x4F : U"RAlt";
S0x50 : U"0";
S0x51 : U"9";
S0x52 : U"P";
S0x53 : U"O";
S0x54 : U";";
S0x55 : U"L";
S0x56 : U".";
S0x57 : U",";
S0x58 : U"8";
S0x59 : U"7";
S0x5A : U"I";
S0x5B : U"U";
S0x5C : U"J";
S0x5D : U"K";
S0x5E : U"M";
S0x5F : U"N";
S0x60 : U"6";
S0x61 : U"5";
S0x62 : U"Y";
S0x63 : U"T";
S0x64 : U"H";
S0x65 : U"G";
S0x66 : U"B";
S0x67 : U"Space";
S0x68 : U"4";
S0x69 : U"3";
S0x6A : U"R";
S0x6B : U"E";
S0x6C : U"F";
S0x6D : U"D";
S0x6E : U"C";
S0x6F : U"V";
S0x70 : U"2";
S0x71 : U"Q";
S0x72 : U"W";
S0x73 : U"A";
S0x74 : U"S";
S0x75 : U"X";
S0x76 : U"Z";
S0x77 : U"LAlt";
S0x78 : U"1";
S0x79 : U"`";
S0x7A : U"Tab";
S0x7B : U"CapsLock";
S0x7C : U"LShift";
S0x7D : U"Inter1";
S0x7E : U"LGui";
S0x7F : U"LCtrl";
PK RKM3$s kll/examples/example.kllName = colemak;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-08-24;
test => myCFunc( dat : 1 );
U"A" : U"B";
# Top row
'e' : 'f';
'r' : 'p';
't' : 'g';
'y' : 'j';
'u' : 'l';
'i' : 'u';
'o' : 'y';
'p' : ';';
# Middle Row
's' : 'r';
'd' : 's';
'f' : 't';
'g' : 'd';
'j' : 'n';
'k' : 'e';
'l' : 'i';
';' : 'o';
# Bottom Row
'n' : 'k';
PK RKM5UB B kll/examples/example2.kll#
# Trigger : Result
U"CapsLock" : U"Ctrl";
# Combination trigger, outputs a sequence of characters
U"Ctrl" + U"Alt" + U"Delete" : 'Grr, Bill Gates...';
# Sequence of keys (like dead-keys), outputs a unicode character
# Unicode requires a driver (not ready yet)
U"Compose", U"A", U"1" : u'☃';
PK RKMt7 7 kll/examples/hhkbpro2.kllName = hhkbpro2;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-09-07;
# Number Row
U"1" : U"F1";
U"2" : U"F2";
U"3" : U"F3";
U"4" : U"F4";
U"5" : U"F5";
U"6" : U"F6";
U"7" : U"F7";
U"8" : U"F8";
U"9" : U"F9";
U"0" : U"F10";
U"-" : U"F11";
U"=" : U"F12";
U"Backslash" : U"Insert";
U"Backtick" : U"Delete";
# Top Row
U"Tab" : U"Capslock";
U"I" : U"PrintScreen";
U"O" : U"ScrollLock";
U"P" : U"Pause";
U"[" : U"Up";
# Middle Row
U"A" : U"VolumeDown";
U"S" : U"VolumeUp";
U"D" : U"Mute";
U"F" : CONS"Eject";
U"H" : U"Keypad Asterix";
U"J" : U"Keypad Slash";
U"K" : U"Home";
U"L" : U"PageUp";
U";" : U"Left";
U"Quote" : U"Right";
# Bottom Row
U"N" : U"Keypad Plus";
U"M" : U"Keypad Minus";
U"Comma" : U"End";
U"Period" : U"PageDown";
U"Slash" : U"Down";
# Space Row
# N/A
PK RKMS kll/examples/leds.kll# LED Syntax Test
Name = leds;
Author = "HaaTa (Jacob Alexander) 2016-2018";
KLL = 0.5;
mydefine = "stuffs here";
mydefine2 = '"stuffs here"'; # For outputting c define strings
mynumber = 414;
# Array variables
myarray[] = elem0 elem1 "elem 2";
myarray[4] = test;
# Key Positioning
S120 <= x:20, rx:15;
S121 <= x:20, y:10, z:2, rx:15, ry:12, rz:39;
S[122-125] <= x:20, rx:15;
S[122] <= z:14;
# Pixel Positioning
P19 <= x:21, rx:16;
P[20] <= x:20, rx:15;
P[21] <= x:20, y:10, z:2, rx:15, ry:12, rz:39;
P[22-25] <= x:20, rx:15;
P[22] <= z:13;
# Pixel Channel Mapping
P[5](4:8, 5:8, 12:8) : None;
P[4](3:8) : S0x31;
P[12](40:8, 50:8, 120:8) : S59;
P12(12:8, 13:8, 14:8) : S[40];
# Animation
A[BLEEdsing] <= loops:3,pos:2,ffunc:interp;
A[BLEEdsing2] <= start, framedelay:1, loop, replace:basic,pfunc:interp;
# Animation Frames
A[BLEEdsing, 0] <= P[4](+32);
A[BLEEdsing, 1] <= P[4](42);
A[BLEEdsing, 2] <= P[4](-12);
A[BLEEdsing, 3] <= P[4](-:32);
A[BLEEdsing, 4] <= P[4](+:400);
A[BLEEdsing, 5] <= P[4](<<2);
A[BLEEdsing, 6] <= P[4](>>1);
A[BLEEdsing, 7-9] <= P[4](+32);
A[BLEEdsing, 10, 12] <= P[4](+32);
A[BLEEdsing, 11, 13-15] <= P[4-10](+32);
A[BLEEdsing2, 0] <= PL[0](127, 30, 40), P[5](20, 30, 40);
A[BLEEdsing2, 1] <= P[1-20,40](40,50,0x60);
A[BLEEdsing2, 2] <= P[c:0](40,50,0x60);
A[BLEEdsing2, 3] <= P[c:10%](40,50,0x60);
A[BLEEdsing2, 4] <= P[r:10%,c:20](40,50,0x60);
A[BLEEdsing2, 5] <= P[r:i+10%,c:i-20](40,50,0x60);
A[BLEEdsing2, 6] <= P[r:i+10%](40,50,0x60);
A[BLEEdsing2, 7] <= U["A"](40,50,0x60);
A[BLEEdsing2, 8] <= U"B"(40,50,0x60);
A[BLEEdsing2, 9] <= S120(40,50,0x60);
A[BLEEdsing2, 10] <= S[0x10](40,50,0x60);
A[BLEEdsing2, 11] <= P[r:i](40,50,0x60);
A[BLEEdsing2, 12] <= P[c:i](40,50,0x60);
A[BLEEdsing2, 13] <= P[r:i,c:i](40,50,0x60);
A[RainbowFillInterp] <= start, pfunc:interp;
A[RainbowFillInterp, 1] <= P[c:20%](255,255,0), P[c:40%](255,0,0), P[c:60%](127,0,255), P[c:80%](0,0,255);
A[RainbowFillInterp, 2] <= P[c:0%](0,255,0), P[c:20%](255,255,0), P[c:40%](255,0,0), P[c:60%](127,0,255), P[c:80%](0,0,255);
# Animation Triggers
myCapability => myFunc( myArg1 : 1, myArg2 : 4 );
# XXX (HaaTa) Frame triggers no longer supported
#A[BLEEdsing, 3] : myCapability( 0x8, 0x25 );
#A[BLEEdsing, 4-6] : myCapability( 0x8, 0x25 );
#A[BLEEdsing, 7, 9] : myCapability( 0x8, 0x25 );
A[BLEEdsing2] : myCapability( 0x8, 0x25 );
#A[BLEEdsing2](D) : myCapability( 0x8, 0x25 );
#A[BLEEdsing2](R) : myCapability( 0x8, 0x25 );
# Animation Results
S[0x37, 0x38] : A"BLEEdsing";
S0x39 : A"BLEEdsing", A"BLEEdsing2";
S0x40 : A[BLEEdsing];
S0x41 : A[BLEEdsing](loops:3);
S0x42 : A[BLEEdsing](loops:2,framedelay:3,framestretch);
S0x43 : PL[0](0xFF,0xFF,244) + P[1-3](20,40,60);
S0x44 : PL[0-2](0xFF,0xFF,244);
S0x44 : PL1(0xFF,0xFF,244);
S0x45 : PL[2](0x1F,0x2F,0x3F);
S0x46 : P[0](11,23,45);
S0x47 : P1(11,23,45);
PK RKMH H kll/examples/leds2.kll# TEMP TEST, REMOVEME
A[BLEEdsing2] <= start, div:1, loop, replace:0,interp:on;
# Animation Frames
A[BLEEdsing, 0] <= P[4](+32);
A[BLEEdsing, 1] <= P[4](42);
A[BLEEdsing2, 1] <= P[1-20,40](40,50,0x60);
A[BLEEdsing2, 2] <= P[c:0](40,50,0x60);
A[BLEEdsing2, 3] <= P[c:10%](40,50,0x60);
A[BLEEdsing2, 4] <= P[r:10%,c:20](40,50,0x60);
A[BLEEdsing2, 5] <= P[r:i+10%,c:i-20](40,50,0x60);
A[BLEEdsing2, 6] <= P[r:i+10%](40,50,0x60);
A[BLEEdsing2, 7] <= U["A"](40,50,0x60);
A[BLEEdsing2, 8] <= U"B"(40,50,0x60);
A[BLEEdsing2, 9] <= S120(40,50,0x60);
A[BLEEdsing2, 10] <= S[0x10](40,50,0x60);
A[RainbowFillInterp] <= toggle, interp:on;
A[RainbowFillInterp, 1] <= P[c:20%](255,255,0), P[c:40%](255,0,0), P[c:60%](127,0,255), P[c:80%](0,0,255);
A[RainbowFillInterp, 2] <= P[c:0%](0,255,0), P[c:20%](255,255,0), P[c:40%](255,0,0), P[c:60%](127,0,255), P[c:80%](0,0,255);
P[22-25] <= x:20, rx:15;
P[22] <= z:13;
S[122-125] <= x:20, rx:15;
S[122] <= z:14;
S0x47 : P[1](11,23,45);
S0x47 : P1(11,23,45);
P[12](40:8, 50:8, 120:8) : S59;
P11(40:8, 50:8, 120:8) : S54;
S0x42 : A[BLEEdsing](loop:2,div:3);
PK RKM_LY kll/examples/mapping.kllName = mapping test;
Author = "HaaTa (Jacob Alexander) 2016";
KLL = 0.5b;
# Mapping Operators ##
S0x01 : U"A";
S0x01 : U"A";
# Must include file twice for this case to work
S0x02 : U"B";
U"B" :: U"C";
S0x03 :+ U"D";
S0x04 : U"E";
S0x04 :+ U"F";
S0x04 :+ U"F";
S0x04 :+ U"L";
S0x06 : U"G";
S0x06 :+ U"H";
S0x06 :- U"G";
S0x07 + S0x08 : U"H";
S0x09, S0x0A : U"I";
S[0x0B, 0x0B, 0x0C] : U"J";
S0x0D :- U"K";
## Isolation Mappings ##
S0x10 i: U"M";
S0x10 i: U"M";
# Must include file twice for this case to work
S0x11 i: U"N";
U"N" i:: U"O";
S0x12 i:+ U"P";
S0x13 i: U"Q";
S0x13 i:+ U"R";
S0x14 i: U"S";
S0x14 i:+ U"T";
S0x14 i:- U"S";
S0x15 + S0x16 i: U"U";
S0x17, S0x18 i: U"V";
S[0x19, 0x1A] i: U"W";
S0x1B i:- U"X";
PK RKMX kll/examples/md1Map.kllName = MD1;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-09-07;
# MOVE THIS SECTION to another file
usbKeyOut => Output_usbCodeSend_capability( usbCode : 1 );
layerState => Macro_layerState_capability( layer : 2, state : 1 );
layerLatch => Macro_layerLatch_capability( layer : 2 );
layerLock => Macro_layerLock_capability( layer : 2 );
layerShift => Macro_layerShift_capability( layer : 2 );
# END SECTION
S0x00 : U"Esc";
S0x01 : U"1";
S0x02 : U"2";
S0x03 : U"3";
S0x04 : U"4";
S0x05 : U"5";
S0x06 : U"6";
S0x07 : U"7";
S0x08 : U"8";
S0x09 : U"9";
S0x0A : U"0";
S0x0B : U"Minus";
S0x0C : U"Equal";
S0x0D : U"Backslash";
S0x0E : U"Tab";
S0x0F : U"Q";
S0x10 : U"W";
S0x11 : U"E";
S0x12 : U"R";
S0x13 : U"T";
S0x14 : U"Y";
S0x15 : U"U";
S0x16 : U"I";
S0x17 : U"O";
S0x18 : U"P";
S0x19 : U"LBrace";
S0x1A : U"RBrace";
S0x1B : U"Backspace";
S0x1C : U"Ctrl";
S0x1D : U"A";
S0x1E : U"S";
S0x1F : U"D";
S0x20 : U"F";
S0x21 : U"G";
S0x22 : U"H";
S0x23 : U"J";
S0x24 : U"K";
S0x25 : U"L";
S0x26 : U"Semicolon";
S0x27 : U"Quote";
S0x28 : U"Enter";
S0x29 : U"LShift";
S0x2A : U"Z";
S0x2B : U"X";
S0x2C : U"C";
S0x2D : U"V";
S0x2E : U"B";
S0x2F : U"N";
S0x30 : U"M";
S0x31 : U"Comma";
S0x32 : U"Period";
S0x33 : U"Slash";
S0x34 : U"RShift";
S0x35 : U"Function1"; # Fun key
S0x36 : U"Function2"; # Left Blank Key
S0x37 : U"LAlt";
S0x38 : U"LGui";
S0x39 : U"Space";
S0x3A : U"RGui";
S0x3B : U"RAlt";
S0x3C : U"Function3"; # Right Blank Key 1
S0x3D : U"Function4"; # Right Blank Key 2
S0x3E : U"BackTick";
# TODO MOVE
# Function Layer Assignments
U"Function1" : layerShift( 1 );
U"Function2" : layerShift( 1 );
U"Function3" : layerShift( 1 );
U"Function4" : layerShift( 1 );
PK RKMvi i kll/examples/nonetest.kll# None Syntax Test
Name = None;
Author = "HaaTa (Jacob Alexander) 2016-2017";
KLL = 0.5;
U"A" : None;
PK RKMl&p kll/examples/simple1.kllName = colemak;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
myCapability2 => myFunc2();
myCapability3 => myFunc3( myArg1 : 2 );
myCapability => myFunc( myArg1 : 1, myArg2 : 4 );
usbKeyOut => Output_usbCodeSend_capability( usbCode : 1 );
S0x3 : myCapability2();
S0x4 : myCapability2() + myCapability3(0x5);
S0x5 : myCapability2(), myCapability3(0x6);
S0x7 : myCapability3( 0x8 );
S0x8 : myCapability( 0x2, 0x4 );
S0x6 : 'abcdDfF';
S0x6 : 'abcdDfF', U0xb;
S0x6 : 'abcdDfF', U0xb, U[0x1-0x4,0xa];
S0x40 : U[0x1];
S0x40 : U[0x1,0x2];
S0x40 : U[0x1-0x4];
S0x40 : U[0x1-0x4,0xa];
S0x40 : U[0x1-0x4,0xa] + U0x5;
S0x40 : U[0x1-0x4,0xa] + U[0x5-0x20];
S0x40 : U[0x1-0x4,0xa] + U[0x5-0x20], U33;
S0x40 : U0x01, U2;
S0x40 : U0x01, U2+U3;
S0x40 : U0x01, U2+U"Esc";
S0x0B : U["A"-"5"];
S0x0B : U["Esc"];
S0x46 : U0x01, U2+U["2"-"5"];
S0x40 : U0x01, U2+U["Esc"];
S0x40 : U0x01, U2+U["Esc", "A", "C"-"F"];
S0x40 : U"Backspace";
S0x1+S0x2 : U"2";
S0x3,S0x4 : U"1";
S0x1+S0x2,S0x3,S0x4 : U"3";
S[ 0x5 ] : U"4";
S[ 0x5, 0x6 ] : U"5";
S[ 0x5, 0x6, 0x7, 0x8 ] : U"5";
S[ 0x7 - 0x9 ] : U"6";
S[ 0x2 - 0x9, 0x10 ] : U"r";
S[ 0x2 - 0x9, 0x10 ]+S[0x5 - 0x6, 0x9],S0xA+S0xB : U"r";
S0x42 : U"]";
S0x42 : U"Esc";
PK RKMI4 kll/examples/simple2.kll#
Name = colemak;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
mydefine = "stuffs here";
mydefine2 = '"stuffs here"'; # For outputting c define strings
mynumber = 414;
mydefine => myCdef;
mydefine2 => myCdef2;
mydefine3 => myCdef3;
mynumber => myCnumber;
usbKeyOut => Output_usbCodeSend_capability( usbCode : 1 );
consCtrlOut => Output_consCtrlSend_capability( consCode : 2 );
noneOut => Output_noneSend_capability();
sysCtrlOut => Output_sysCtrlSend_capability( sysCode : 1 );
myCapability2 => myFunc2();
myCapability3 => myFunc3( myArg1 : 2 );
myCapability => myFunc( myArg1 : 1, myArg2 : 4 );
S0x3 : myCapability2();
S[0x4] : myCapability( 0x8, 0x25 );
S[ 0x7 - 0x9 ] : U"6";
S0x40 : U[0x1-0x4];
S0x12 : U[122] + U[123];
S0x6 : 'abcdDfF';
S0x40 : U[0x1];
S0x0B : U["Esc"];
S0x0B :+ U["Q"];
S[ 0x7 - 0x9 ], S[0x2,0x3] : U"6";
S[ 0x2 - 0x9, 0x10 ] :+ U"r";
S0x0B :- U["Esc"];
S[ 0x3 - 0x4 ] + S[ 0x10 ], S[ 0x20 ] : U"Enter";
S127 + S128 : U"0";
S0x41 : CONS[0x30];
S0x42 : CONS["Play"];
S0x43 : CONS0x31;
S0x45 : SYS[0xA0];
S0x46 : SYS["UnDock"];
S0x47 : SYS0xA2;
S[0x48] : None;
S0x30(P) : U"A";
S0x29(P:10ms) : U"A";
S0x28(20) : U"A";
S0x31(H:20ms, R:1s) : U"B";
S0x32(P,H,R) : U"B";
PK RKMRl kll/examples/simpleExample.kllName = "Simple Example";
Author = "HaaTa (Jacob Alexander) 2014-2015";
KLL = 0.3a;
usbKeyOut => Output_usbCodeSend_capability( usbCode : 1 );
S0x40 : U0x43;
S0x40 : U"Backspace";
S0x42 : U"]";
S0x42 : U"Esc";
PK RKMWJ ! kll/examples/state_scheduling.kllName = "State Scheduling";
Author = "HaaTa (Jacob Alexander) 2016";
KLL = 0.4;
mydefine = "stuffs here";
mydefine2 = '"stuffs here"'; # For outputting c define strings
mynumber = 414;
# State Scheduling
S0x43 : U"Enter";
S[0x43(P,UP,UR)] : U"Enter";
S0x44(P) : U"Enter";
S0x45(UP) : U"Enter";
S0x46(UR) : U"Enter";
S0x46(R) : U"Enter";
S0x47(H) + S0x48 : U"Enter";
S0x49(O) + S0x50 : U"Enter";
# Timing Triggers
U"t"(300ms) : 'duuude';
U"t"(30.2ms) : 'duuude';
U"i"(200) : 'duuude1';
U"u"(1s) : 'duuud2e';
U"m"(40us) : 'duuu3de';
U"a" + U"b"(P:1s) : 'slow';
U"a" + U"b"(P:50ms,H:100ms,R:200ms) : 'fast';
# Timing Results
U"x" : U"a"(300ms);
U"v" : U"a"(P,H:300ms,R);
# Analog
S0x2A(10) : U"B";
S0x2A(80) : U"C";
S[34-52](22) : 'boo';
S[34-52(88)](22) : 'beh';
S[34-52(88), 78](30) : 'joe';
U"A"(0) : U"A"; # Pulse
U"A"(42) : U"Q";
U["1"-"5"(42), "Tab"](30) : 'mac';
# Indicators
I"NumLock" : U"Space";
I"NumLock"(A) : U"Space";
I"NumLock"(D) : U"Z";
I2 : U"G"; # CapsLock
U"a" + I"NumLock"(Off) : U"Q";
U"a" + I"NumLock"(On) : U"W";
PK RKM kll/examples/triggers.kll# Various triggers
Name = triggers;
Author = "HaaTa (Jacob Alexander) 2018";
KLL = 0.5;
# Scancode Trigger
S0x2 : U"A";
# USB Code Trigger
U"Z" : U"B";
# Indicator Light Trigger
I"NumLock" : U"C";
I"CapsLock" : U"D";
I["ScrollLock"] : U"E";
I0x04 : U"Q";
# Layer Trigger (new syntax)
Layer[1] : U"F";
Layer[1-3] : U"R";
Layer[2,3] : U"S";
LayerShift[2] : U"G";
LayerLock[2] : U"H";
LayerLatch[2] : U"I";
U"J" : Layer[1]; # Functions as LayerShift[1]
U"T" : Layer[1-3]; # Functions as LayerShift[1-3]
U"U" : Layer[2,3]; # Functions as LayerShift[2,3]
U"K" : LayerShift[1];
U"L" : LayerLock[1];
U"M" : LayerLatch[1];
# Animation Trigger
A[MyAnimation] <= start;
A[MyAnimation] : U"N";
#A[MyAnimation](D) : U"O";
#A[MyAnimation](R) : U"P";
# Generic Triggers (all uid 0)
T[0,0] : U"A"; # Switch Bank 1
T[4,0] : U"B"; # LED Bank 1
T[5,0] : U"C"; # Analog Bank 1
T[9,0] : U"D"; # Layer Bank 1
T[13,0] : U"E"; # Animation Bank 1
T[17,0] : U"F"; # Sleep Bank 1
T[18,40] : U"G"; # Inactive Bank 1, after 40 seconds
T[0,0] + T[0,1], T[0,2] : U"H";
T[0,0] + T[0,1] : U"I";
T[0,0], T[0,1] : U"J";
# Rotations
T[20,0](0) : U"A";
T[20,0](1) : U"B";
T[20,0](2) : U"C";
T[20,1](0) : U"1";
T[20,1](5) : U"2";
T[20,1](10) : U"3";
U"Y" : rotate(0, 1);
U"X" : rotate(0, -1);
U"W" : rotate(1, 1);
U"V" : rotate(1, -1);
PK RKMBC C ( kll/examples/locale/base.locale-test.kll# Base KLL file used in locale tests
Name = base.locale-test;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2018";
KLL = 0.5;
HIDMapping = default;
# Modified Date`
Date = 2018-06-04;
# Adapted from an early Kira mapping
S1 : U"Esc";
S2 : U"F1";
S3 : U"F2";
S4 : U"F3";
S5 : U"F4";
S6 : U"F5";
S7 : U"F6";
S8 : U"F7";
S9 : U"F8";
S10 : U"F9";
S11 : U"F10";
S12 : U"F11";
S13 : U"F12";
S14 : U"PrintScreen";
S15 : U"Delete";
S16 : U"Home";
S17 : U"End";
S18 : U"PageUp";
S19 : U"PageDown";
S20 : U"Backtick";
S21 : U"1";
S22 : U"2";
S23 : U"3";
S24 : U"4";
S25 : U"5";
S26 : U"6";
S27 : U"7";
S28 : U"8";
S29 : U"9";
S30 : U"0";
S31 : U"Minus";
S32 : U"Equals";
S33 : U"Backspace";
S34 : U"NumLock";
S35 : U"P/";
S36 : U"P*";
S37 : U"P-";
S38 : U"Tab";
S39 : U"Q";
S40 : U"W";
S41 : U"E";
S42 : U"R";
S43 : U"T";
S44 : U"Y";
S45 : U"U";
S46 : U"I";
S47 : U"O";
S48 : U"P";
S49 : U"LBrace";
S50 : U"RBrace";
S51 : U"Backslash";
S52 : U"P7";
S53 : U"P8";
S54 : U"P9";
S55 : U"P+";
S56 : U"CapsLock";
S57 : U"A";
S58 : U"S";
S59 : U"D";
S60 : U"F";
S61 : U"G";
S62 : U"H";
S63 : U"J";
S64 : U"K";
S65 : U"L";
S66 : U"Semicolon";
S67 : U"Quote";
S68 : U"Enter";
S69 : U"P4";
S70 : U"P5";
S71 : U"P6";
S72 : U"LShift";
S73 : U"Z";
S74 : U"X";
S75 : U"C";
S76 : U"V";
S77 : U"B";
S78 : U"N";
S79 : U"M";
S80 : U"Comma";
S81 : U"Period";
S82 : U"Slash";
S83 : U"RShift";
S84 : U"Up";
S85 : U"P1";
S86 : U"P2";
S87 : U"P3";
S88 : U"PEnter";
S89 : U"LCtrl";
S90 : U"LGui";
S91 : U"LAlt";
S92 : U"Space";
S93 : U"RAlt";
S94 : U"RCtrl";
S95 : U"Left";
S96 : U"Down";
S97 : U"Right";
S98 : U"P0";
S99 : U"P.";
PK RKMz= = ) kll/examples/locale/de_DE.locale-test.kll# Test Layout Using a de_DE locale
Name = de_DE-Test;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2018";
KLL = 0.5;
HIDMapping = de_DE;
# Modified Date
Date = 2018-06-04;
# Remap Y to Z and Z to Y
# Then check that:
# S44 -> 0x1D (US Z, DE Y)
# S73 -> 0x1C (US Y, DE Z)
# Will valid
U"Y" : U"Z";
U"Z" : U"Y";
PK RKMLN kll/extern/README.md# External Dependencies
These are non-kll compiler specific dependencies.
* [funcparserlib](funcparserlib) - Copy of funcparserlib with a few debugging enhancements.
PK RKM $ kll/extern/funcparserlib/__init__.pyPK RKM< < ! kll/extern/funcparserlib/lexer.py# -*- coding: utf-8 -*-
# Copyright (c) 2008/2013 Andrey Vlasovskikh
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__all__ = ['make_tokenizer', 'Token', 'LexerError']
import re
class LexerError(Exception):
def __init__(self, place, msg):
self.place = place
self.msg = msg
def __str__(self):
s = 'cannot tokenize data'
line, pos = self.place
return '%s: %d,%d: "%s"' % (s, line, pos, self.msg)
class Token(object):
def __init__(self, type, value, start=None, end=None):
self.type = type
self.value = value
self.start = start
self.end = end
def __repr__(self):
return 'Token(%r, %r)' % (self.type, self.value)
def __eq__(self, other):
# FIXME: Case sensitivity is assumed here
return self.type == other.type and self.value == other.value
def _pos_str(self):
if self.start is None or self.end is None:
return ''
else:
sl, sp = self.start
el, ep = self.end
return '%d,%d-%d,%d:' % (sl, sp, el, ep)
def __str__(self):
s = "%s %s '%s'" % (self._pos_str(), self.type, self.value)
return s.strip()
@property
def name(self):
return self.value
def pformat(self):
return "%s %s '%s'" % (self._pos_str().ljust(20),
self.type.ljust(14),
self.value)
def make_tokenizer(specs):
"""[(str, (str, int?))] -> (str -> Iterable(Token))"""
def compile_spec(spec):
name, args = spec
return name, re.compile(*args)
compiled = [compile_spec(s) for s in specs]
def match_specs(specs, str, i, position):
line, pos = position
for type, regexp in specs:
m = regexp.match(str, i)
if m is not None:
value = m.group()
nls = value.count('\n')
n_line = line + nls
if nls == 0:
n_pos = pos + len(value)
else:
n_pos = len(value) - value.rfind('\n') - 1
return Token(type, value, (line, pos + 1), (n_line, n_pos))
else:
errline = str.splitlines()[line - 1]
raise LexerError((line, pos + 1), errline)
def f(str):
length = len(str)
line, pos = 1, 0
i = 0
while i < length:
t = match_specs(compiled, str, i, (line, pos))
yield t
line, pos = t.end
i += len(t.value)
return f
# This is an example of a token spec. See also [this article][1] for a
# discussion of searching for multiline comments using regexps (including `*?`).
#
# [1]: http://ostermiller.org/findcomment.html
_example_token_specs = [
('COMMENT', (r'\(\*(.|[\r\n])*?\*\)', re.MULTILINE)),
('COMMENT', (r'\{(.|[\r\n])*?\}', re.MULTILINE)),
('COMMENT', (r'//.*',)),
('NL', (r'[\r\n]+',)),
('SPACE', (r'[ \t\r\n]+',)),
('NAME', (r'[A-Za-z_][A-Za-z_0-9]*',)),
('REAL', (r'[0-9]+\.[0-9]*([Ee][+\-]?[0-9]+)*',)),
('INT', (r'[0-9]+',)),
('INT', (r'\$[0-9A-Fa-f]+',)),
('OP', (r'(\.\.)|(<>)|(<=)|(>=)|(:=)|[;,=\(\):\[\]\.+\-<>\*/@\^]',)),
('STRING', (r"'([^']|(''))*'",)),
('CHAR', (r'#[0-9]+',)),
('CHAR', (r'#\$[0-9A-Fa-f]+',)),
]
#tokenize = make_tokenizer(_example_token_specs)
PK RKM:&3 &3 " kll/extern/funcparserlib/parser.py# -*- coding: utf-8 -*-
# Copyright (c) 2008/2013 Andrey Vlasovskikh
# Modifications by Jacob Alexander 2014, 2016
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""A recurisve descent parser library based on functional combinators.
Basic combinators are taken from Harrison's book ["Introduction to Functional
Programming"][1] and translated from ML into Python. See also [a Russian
translation of the book][2].
[1]: http://www.cl.cam.ac.uk/teaching/Lectures/funprog-jrh-1996/
[2]: http://code.google.com/p/funprog-ru/
A parser `p` is represented by a function of type:
p :: Sequence(a), State -> (b, State)
that takes as its input a sequence of tokens of arbitrary type `a` and a
current parsing state and return a pair of a parsed token of arbitrary type
`b` and the new parsing state.
The parsing state includes the current position in the sequence being parsed and
the position of the rightmost token that has been consumed while parsing.
Parser functions are wrapped into an object of the class `Parser`. This class
implements custom operators `+` for sequential composition of parsers, `|` for
choice composition, `>>` for transforming the result of parsing. The method
`Parser.parse` provides an easier way for invoking a parser hiding details
related to a parser state:
Parser.parse :: Parser(a, b), Sequence(a) -> b
Altough this module is able to deal with a sequences of any kind of objects, the
recommended way of using it is applying a parser to a `Sequence(Token)`.
`Token` objects are produced by a regexp-based tokenizer defined in
`funcparserlib.lexer`. By using it this way you get more readable parsing error
messages (as `Token` objects contain their position in the source file) and good
separation of lexical and syntactic levels of the grammar. See examples for more
info.
Debug messages are emitted via a `logging.Logger` object named
`"funcparserlib"`.
"""
__all__ = [
'some', 'a', 'many', 'pure', 'finished', 'maybe', 'skip', 'oneplus',
'forward_decl', 'NoParseError',
]
import logging
log = logging.getLogger('funcparserlib')
debug = False
def Parser_debug(enable, stream=None):
'''
Enables/Disables debug logger for parser.py
NOTE: This is not really multi-thread friendly
@param stream: StringIO stream to use
@param enable: Enable/disable debug stream
'''
global debug
debug = enable
if enable:
logging.raiseExceptions = False
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
log.addHandler(ch)
class Parser(object):
"""A wrapper around a parser function that defines some operators for parser
composition.
"""
def __init__(self, p):
"""Wraps a parser function p into an object."""
self.define(p)
def named(self, name):
"""Specifies the name of the parser for more readable parsing log."""
self.name = name
return self
def define(self, p):
"""Defines a parser wrapped into this object."""
f = getattr(p, 'run', p)
if debug:
setattr(self, '_run', f)
else:
setattr(self, 'run', f)
self.named(getattr(p, 'name', p.__doc__))
def run(self, tokens, s):
"""Sequence(a), State -> (b, State)
Runs a parser wrapped into this object.
"""
if debug:
# Truncate at 500 characters
# Any longer isn't that useful and makes the output hard to read
output = 'trying %s' % self.name
if len( output ) > 500:
output = output[:250] + ' ... [truncated] ... ' + output[-250:]
log.debug(output)
return self._run(tokens, s)
def _run(self, tokens, s):
raise NotImplementedError('you must define() a parser')
def parse(self, tokens):
"""Sequence(a) -> b
Applies the parser to a sequence of tokens producing a parsing result.
It provides a way to invoke a parser hiding details related to the
parser state. Also it makes error messages more readable by specifying
the position of the rightmost token that has been reached.
"""
try:
(tree, _) = self.run(tokens, State())
return tree
except NoParseError as e:
max = e.state.max
if len(tokens) > max:
tok = tokens[max]
else:
tok = ''
raise NoParseError('%s: %s' % (e.msg, tok), e.state, tok)
def __add__(self, other):
"""Parser(a, b), Parser(a, c) -> Parser(a, _Tuple(b, c))
A sequential composition of parsers.
NOTE: The real type of the parsed value isn't always such as specified.
Here we use dynamic typing for ignoring the tokens that are of no
interest to the user. Also we merge parsing results into a single _Tuple
unless the user explicitely prevents it. See also skip and >>
combinators.
"""
def magic(v1, v2):
vs = [v for v in [v1, v2] if not isinstance(v, _Ignored)]
if len(vs) == 1:
return vs[0]
elif len(vs) == 2:
if isinstance(vs[0], _Tuple):
return _Tuple(v1 + (v2,))
else:
return _Tuple(vs)
else:
return _Ignored(())
@Parser
def _add(tokens, s):
(v1, s2) = self.run(tokens, s)
(v2, s3) = other.run(tokens, s2)
return magic(v1, v2), s3
# or in terms of bind and pure:
# _add = self.bind(lambda x: other.bind(lambda y: pure(magic(x, y))))
_add.name = '(%s , %s)' % (self.name, other.name)
return _add
def __or__(self, other):
"""Parser(a, b), Parser(a, c) -> Parser(a, b or c)
A choice composition of two parsers.
NOTE: Here we are not providing the exact type of the result. In a
statically typed langage something like Either b c could be used. See
also + combinator.
"""
@Parser
def _or(tokens, s):
try:
return self.run(tokens, s)
except NoParseError as e:
return other.run(tokens, State(s.pos, e.state.max))
_or.name = '(%s | %s)' % (self.name, other.name)
return _or
def __rshift__(self, f):
"""Parser(a, b), (b -> c) -> Parser(a, c)
Given a function from b to c, transforms a parser of b into a parser of
c. It is useful for transorming a parser value into another value for
making it a part of a parse tree or an AST.
This combinator may be thought of as a functor from b -> c to Parser(a,
b) -> Parser(a, c).
"""
@Parser
def _shift(tokens, s):
(v, s2) = self.run(tokens, s)
return f(v), s2
# or in terms of bind and pure:
# _shift = self.bind(lambda x: pure(f(x)))
_shift.name = '(%s)' % (self.name,)
return _shift
def bind(self, f):
"""Parser(a, b), (b -> Parser(a, c)) -> Parser(a, c)
NOTE: A monadic bind function. It is used internally to implement other
combinators. Functions bind and pure make the Parser a Monad.
"""
@Parser
def _bind(tokens, s):
(v, s2) = self.run(tokens, s)
return f(v).run(tokens, s2)
_bind.name = '(%s >>=)' % (self.name,)
return _bind
class State(object):
"""A parsing state that is maintained basically for error reporting.
It consists of the current position pos in the sequence being parsed and
the position max of the rightmost token that has been consumed while
parsing.
"""
def __init__(self, pos=0, max=0):
self.pos = pos
self.max = max
def __str__(self):
return str((self.pos, self.max))
def __repr__(self):
return 'State(%r, %r)' % (self.pos, self.max)
class NoParseError(Exception):
def __init__(self, msg='', state=None, token=None):
self.msg = msg
self.state = state
self.token = token # Next token
def __str__(self):
return self.msg
class _Tuple(tuple):
pass
class _Ignored(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return '_Ignored(%s)' % repr(self.value)
@Parser
def finished(tokens, s):
"""Parser(a, None)
Throws an exception if any tokens are left in the input unparsed.
"""
if s.pos >= len(tokens):
return None, s
else:
raise NoParseError('should have reached ', s, tokens[s.pos])
finished.name = 'finished'
def many(p):
"""Parser(a, b) -> Parser(a, [b])
Returns a parser that infinitely applies the parser p to the input sequence
of tokens while it successfully parsers them. The resulting parser returns a
list of parsed values.
"""
@Parser
def _many(tokens, s):
"""Iterative implementation preventing the stack overflow."""
res = []
try:
while True:
(v, s) = p.run(tokens, s)
res.append(v)
except NoParseError as e:
return res, State(s.pos, e.state.max)
_many.name = '{ %s }' % p.name
return _many
def some(pred):
"""(a -> bool) -> Parser(a, a)
Returns a parser that parses a token if it satisfies a predicate pred.
"""
@Parser
def _some(tokens, s):
if s.pos >= len(tokens):
raise NoParseError('no tokens left in the stream', s)
else:
t = tokens[s.pos]
if pred(t):
pos = s.pos + 1
s2 = State(pos, max(pos, s.max))
if debug:
log.debug('*matched* "%s", new state = %s' % (t, s2))
return t, s2
else:
if debug:
log.debug('failed "%s", state = %s' % (t, s))
raise NoParseError('got unexpected token', s, t)
_some.name = '(some)'
return _some
def a(value):
"""Eq(a) -> Parser(a, a)
Returns a parser that parses a token that is equal to the value value.
"""
name = getattr(value, 'name', value)
return some(lambda t: t == value).named('(a "%s")' % (name,))
def pure(x):
@Parser
def _pure(_, s):
return x, s
_pure.name = '(pure %r)' % (x,)
return _pure
def maybe(p):
"""Parser(a, b) -> Parser(a, b or None)
Returns a parser that retuns None if parsing fails.
NOTE: In a statically typed language, the type Maybe b could be more
approprieate.
"""
return (p | pure(None)).named('[ %s ]' % (p.name,))
def skip(p):
"""Parser(a, b) -> Parser(a, _Ignored(b))
Returns a parser which results are ignored by the combinator +. It is useful
for throwing away elements of concrete syntax (e. g. ",", ";").
"""
return p >> _Ignored
def oneplus(p):
"""Parser(a, b) -> Parser(a, [b])
Returns a parser that applies the parser p one or more times.
"""
q = p + many(p) >> (lambda x: [x[0]] + x[1])
return q.named('(%s , { %s })' % (p.name, p.name))
def with_forward_decls(suspension):
"""(None -> Parser(a, b)) -> Parser(a, b)
Returns a parser that computes itself lazily as a result of the suspension
provided. It is needed when some parsers contain forward references to
parsers defined later and such references are cyclic. See examples for more
details.
"""
@Parser
def f(tokens, s):
return suspension().run(tokens, s)
return f
def forward_decl():
"""None -> Parser(?, ?)
Returns an undefined parser that can be used as a forward declaration. You
will be able to define() it when all the parsers it depends on are
available.
"""
@Parser
def f(tokens, s):
raise NotImplementedError('you must define() a forward_decl somewhere')
return f
if __name__ == '__main__':
import doctest
doctest.testmod()
PK RKMn| kll/extern/funcparserlib/util.py# -*- coding: utf-8 -*-
# Copyright (c) 2008/2013 Andrey Vlasovskikh
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def pretty_tree(x, kids, show):
"""(a, (a -> list(a)), (a -> str)) -> str
Returns a pseudographic tree representation of x similar to the tree command
in Unix.
"""
(MID, END, CONT, LAST, ROOT) = ('|-- ', '`-- ', '| ', ' ', '')
def rec(x, indent, sym):
line = indent + sym + show(x)
xs = kids(x)
if len(xs) == 0:
return line
else:
if sym == MID:
next_indent = indent + CONT
elif sym == ROOT:
next_indent = indent + ROOT
else:
next_indent = indent + LAST
syms = [MID] * (len(xs) - 1) + [END]
lines = [rec(x, next_indent, sym) for x, sym in zip(xs, syms)]
return '\n'.join([line] + lines)
return rec(x, '', ROOT)
PK RKMc" $ kll/layouts/KType-NoAnimations-0.kllVariant = "standard";
Generator = "NONE";
Base = "Base";
Date = "2017-10-13";
KLL = "0.5c";
Author = "jbondeson (Jeremy Bondeson) 2017";
Version = "0.2";
Name = "KType";
Layout = "NoAnimations";
ResultMacroBufferSize = "100";
Pixel_AnimationStackSize = "50";
DelayedCapabilitiesStackSize = "25";
U"ESC" : U"ESC";
U"F1" : U"F1";
U"F2" : U"F2";
U"F3" : U"F3";
U"F4" : U"F4";
U"F5" : U"F5";
U"F6" : U"F6";
U"F7" : U"F7";
U"F8" : U"F8";
U"F9" : U"F9";
U"F10" : U"F10";
U"F11" : U"F11";
U"F12" : U"F12";
U"PRINTSCREEN" : U"PRINTSCREEN";
U"SCROLLLOCK" : U"SCROLLLOCK";
U"PAUSE" : U"PAUSE";
U"BACKTICK" : U"`";
U"1" : U"1";
U"2" : U"2";
U"3" : U"3";
U"4" : U"4";
U"5" : U"5";
U"6" : U"6";
U"7" : U"7";
U"8" : U"8";
U"9" : U"9";
U"0" : U"0";
U"MINUS" : U"-";
U"EQUALS" : U"=";
U"BACKSPACE" : U"\";
U"INSERT" : U"INSERT";
U"HOME" : U"HOME";
U"PAGEUP" : U"PAGEUP";
U"TAB" : U"TAB";
U"Q" : U"Q";
U"W" : U"W";
U"E" : U"E";
U"R" : U"R";
U"T" : U"T";
U"Y" : U"Y";
U"U" : U"U";
U"I" : U"I";
U"O" : U"O";
U"P" : U"P";
U"LBRACE" : U"[";
U"RBRACE" : U"]";
U"BACKSLASH" : U"BACKSPACE";
U"DELETE" : U"DELETE";
U"END" : U"END";
U"PAGEDOWN" : U"PAGEDOWN";
U"CAPSLOCK" : U"LCTRL";
U"A" : U"A";
U"S" : U"S";
U"D" : U"D";
U"F" : U"F";
U"G" : U"G";
U"H" : U"H";
U"J" : U"J";
U"K" : U"K";
U"L" : U"L";
U"SEMICOLON" : U";";
U"QUOTE" : U"'";
U"ENTER" : U"ENTER";
U"LSHIFT" : U"LSHIFT";
U"Z" : U"Z";
U"X" : U"X";
U"C" : U"C";
U"V" : U"V";
U"B" : U"B";
U"N" : U"N";
U"M" : U"M";
U"COMMA" : U",";
U"PERIOD" : U".";
U"SLASH" : U"/";
U"RSHIFT" : U"RSHIFT";
U"UP" : U"UP";
U"LCTRL" : U"LCTRL";
U"LGUI" : U"LGUI";
U"LALT" : U"LALT";
U"SPACE" : U"SPACE";
U"RALT" : U"RALT";
U"RGUI" : U"FUNCTION1";
U"APP" : U"APP";
U"RCTRL" : U"RCTRL";
U"LEFT" : U"LEFT";
U"DOWN" : U"DOWN";
U"RIGHT" : U"RIGHT";
S[0x00-0x5F] :+ A[countdown2m](start);
### Added by canned animation fingerprints_two_tone ###
S[0x00-0x5F] :+ A[fingerprints_two_tone](start);
A[countdown2m] <= replace:all, pfunc:interp,framestretch,framedelay:254, loops:1;
### AUTO GENERATED - DO NOT EDIT ###
A[countdown2m, 1] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 2] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 3] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 4] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 5] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 6] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 7] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 8] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 9] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 10] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 11] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 12] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 13] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 14] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 15] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 16] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 17] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 18] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 19] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 20] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 21] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 22] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 23] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 24] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 25] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 26] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 27] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 28] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 29] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 30] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 31] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 32] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 33] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 34] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 35] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 36] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 37] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 38] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 39] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 40] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 41] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 42] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 43] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 44] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 45] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 46] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 47] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[countdown2m, 48] <= P[c:0%](0,0,0), P[c:100%](0,0,0);
A[fingerprints_two_tone] <= framedelay:8, framestretch, loops:1, replace:basic;
### AUTO GENERATED - DO NOT EDIT ###
A[fingerprints_two_tone, 1] <= P[r:i,c:i](255,0,0);
A[fingerprints_two_tone, 2] <= P[r:i,c:i](255,0,0);
A[fingerprints_two_tone, 3] <= P[r:i,c:i](255,0,0);
A[fingerprints_two_tone, 4] <= P[r:i,c:i](255,0,0);
A[fingerprints_two_tone, 5] <= P[r:i,c:i](255,115,82);
A[fingerprints_two_tone, 6] <= P[r:i,c:i](255,178,153);
A[fingerprints_two_tone, 7] <= P[r:i,c:i](255,207,191);
A[fingerprints_two_tone, 8] <= P[r:i,c:i](255,217,203);
A[fingerprints_two_tone, 9] <= P[r:i,c:i](255,226,216);
A[fingerprints_two_tone, 10] <= P[r:i,c:i](255,236,229);
A[fingerprints_two_tone, 11] <= P[r:i,c:i](255,245,242);
A[fingerprints_two_tone, 12] <= P[r:i,c:i](255,255,255);
PK RKM-_+ + kll/layouts/Rapoo_E9070_Fn.kllName = "Rapoo E9070 Fn";
Version = 0.1;
Author = "nazar-pc (Nazar Mokrynskyi) 2017";
KLL = 0.3d;
# Modified Date
Date = 2017-05-11;
U"F1" : CONS"Back";
U"F2" : CONS"Forward";
U"F3" : CONS"Home";
U"F4" : CONS"EmailReader";
U"F5" : CONS"ConsumerControlConfig";
U"F6" : CONS"PausePlay";
U"F7" : CONS"Stop";
U"F8" : CONS"ScanPreviousTrack";
U"F9" : CONS"ScanNextTrack";
U"F10" : U"VolumeDown";
U"F11" : U"VolumeUp";
U"F12" : U"Mute";
U"Insert" : U"ScrollLock";
U"Left" : U"Home";
U"Up" : U"PageUp";
U"Down" : U"PageDown";
U"Right" : U"End";
PK RKMd!0 kll/layouts/animation_test.kll# Animation Example Configuration
Name = Animation Example;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2017-2018";
KLL = 0.5;
# Modified Date
Date = 2018-02-28;
### Animations ###
# Pixel index 1 test
A[testanimation] <= loops:1;
A[testanimation, 1] <= P[1](30,70,120);
A[testanimation, 2] <= P[1](0,0,0);
A[testanimation, 3] <= P[1](60,90,140);
# Pixel clear test (scancodes)
A[clear_pixels] <= loops:1;
A[clear_pixels, 1] <=
S[18](0,0,0),
S[19](0,0,0),
S[20](0,0,0),
S[29](0,0,0),
S[38](0,0,0);
# Rainbow, static interpolation test
A[rainbow_static_fill_interp] <= pfunc:interp;
A[rainbow_static_fill_interp, 1] <=
P[c:0%](255,0,0),
P[c:25%](255,255,0),
P[c:50%](0,255,0),
P[c:75%](0,0,255),
P[c:100%](127,0,255);
# Full fill example
A[blue_fill_interp] <= pfunc:interp;
A[blue_fill_interp, 1] <=
P[c:0%](0x00,0xAE,0xDA),
P[c:100%](0x00,0xAE,0xDA);
# Fade-in example
# TODO - Use frame interpolation when ready
A[fade_in] <= pfunc:interp, loops:1, replace:basic;
A[fade_in, 1] <= P[c:0%](0,0,0), P[c:100%](0,0,0);
A[fade_in, 2] <= P[c:0%](5,5,5), P[c:100%](5,5,5);
A[fade_in, 3] <= P[c:0%](10,10,10), P[c:100%](10,10,10);
A[fade_in, 4] <= P[c:0%](20,20,20), P[c:100%](20,20,20);
A[fade_in, 5] <= P[c:0%](40,40,40), P[c:100%](40,40,40);
A[fade_in, 6] <= P[c:0%](60,60,60), P[c:100%](60,60,60);
A[fade_in, 7] <= P[c:0%](80,80,80), P[c:100%](80,80,80);
A[fade_in, 8] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
A[fade_in, 9] <= P[c:0%](130,130,130), P[c:100%](130,130,130);
A[fade_in, 10] <= P[c:0%](160,160,160), P[c:100%](160,160,160);
A[fade_in, 11] <= P[c:0%](190,190,190), P[c:100%](190,190,190);
A[fade_in, 12] <= P[c:0%](220,220,220), P[c:100%](220,220,220);
A[fade_in, 13] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[fade_in, 14] <= P[c:0%](200,200,200), P[c:100%](200,200,200);
A[fade_in, 15] <= P[c:0%](200,200,200), P[c:100%](200,200,200);
A[fade_in, 16] <= P[c:0%](200,200,200), P[c:100%](200,200,200);
A[fade_in, 17] <= P[c:0%](150,150,150), P[c:100%](150,150,150);
A[fade_in, 18] <= P[c:0%](150,150,150), P[c:100%](150,150,150);
A[fade_in, 19] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
A[fade_in, 20] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
A[fade_in, 21] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
A[fade_in, 22] <= P[c:0%](75,75,75), P[c:75%](75,75,75);
A[fade_in, 23] <= P[c:0%](75,75,75), P[c:75%](75,75,75);
A[fade_in, 24] <= P[c:0%](75,75,75), P[c:75%](75,75,75);
A[fade_in, 25] <= P[c:0%](50,50,50), P[c:50%](50,50,50);
A[fade_in, 26] <= P[c:0%](50,50,50), P[c:50%](50,50,50);
A[fade_in, 27] <= P[c:0%](50,50,50), P[c:50%](50,50,50);
#A[fade_in, 22] <= P[c:0%](0,100,0), P[c:25%](100,100,0), P[c:50%](100,0,0), P[c:75%](75,0,100), P[c:100%](0,0,100);
#A[fade_in, 23] <= P[c:0%](0,200,0), P[c:25%](200,200,0), P[c:50%](200,0,0), P[c:75%](120,0,200), P[c:100%](0,0,200);
#A[fade_in, 24] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
# Rainbow Animation example
A[rainbow_fill_interp] <= loop, framestretch, framedelay:1, replace:basic, pfunc:interp;
A[rainbow_fill_interp, 1] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_fill_interp, 1] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255);
A[rainbow_fill_interp, 2] <= P[c:0%](255,0,0), P[c:27%](255,255,0), P[c:52%](0,255,0), P[c:77%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_fill_interp, 2] <= P[c:0%](255,0,0), P[c:27%](255,255,0), P[c:52%](0,255,0), P[c:77%](0,0,255);
A[rainbow_fill_interp, 3] <= P[c:0%](255,0,0), P[c:29%](255,255,0), P[c:54%](0,255,0), P[c:79%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_fill_interp, 3] <= P[c:0%](255,0,0), P[c:29%](255,255,0), P[c:54%](0,255,0), P[c:79%](0,0,255);
A[rainbow_fill_interp, 4] <= P[c:0%](255,0,0), P[c:31%](255,255,0), P[c:56%](0,255,0), P[c:81%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_fill_interp, 4] <= P[c:0%](255,0,0), P[c:31%](255,255,0), P[c:56%](0,255,0), P[c:81%](0,0,255);
#A[rainbow_fill_interp, 4] <= P[c:56%](0,255,0), P[c:81%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 5] <= P[c:0%](255,0,0), P[c:33%](255,255,0), P[c:58%](0,255,0), P[c:83%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 6] <= P[c:0%](255,0,0), P[c:35%](255,255,0), P[c:60%](0,255,0), P[c:85%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 7] <= P[c:0%](255,0,0), P[c:37%](255,255,0), P[c:62%](0,255,0), P[c:87%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 8] <= P[c:0%](255,0,0), P[c:39%](255,255,0), P[c:64%](0,255,0), P[c:89%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 9] <= P[c:0%](255,0,0), P[c:41%](255,255,0), P[c:66%](0,255,0), P[c:91%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 10] <= P[c:0%](255,0,0), P[c:43%](255,255,0), P[c:68%](0,255,0), P[c:93%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 11] <= P[c:0%](255,0,0), P[c:45%](255,255,0), P[c:70%](0,255,0), P[c:95%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 12] <= P[c:0%](255,0,0), P[c:47%](255,255,0), P[c:72%](0,255,0), P[c:97%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 13] <= P[c:0%](255,0,0), P[c:45%](255,255,0), P[c:70%](0,255,0), P[c:95%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 14] <= P[c:0%](255,0,0), P[c:43%](255,255,0), P[c:68%](0,255,0), P[c:93%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 15] <= P[c:0%](255,0,0), P[c:41%](255,255,0), P[c:66%](0,255,0), P[c:91%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 16] <= P[c:0%](255,0,0), P[c:39%](255,255,0), P[c:64%](0,255,0), P[c:89%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 17] <= P[c:0%](255,0,0), P[c:37%](255,255,0), P[c:62%](0,255,0), P[c:87%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 18] <= P[c:0%](255,0,0), P[c:35%](255,255,0), P[c:60%](0,255,0), P[c:85%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 19] <= P[c:0%](255,0,0), P[c:33%](255,255,0), P[c:58%](0,255,0), P[c:83%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 20] <= P[c:0%](255,0,0), P[c:31%](255,255,0), P[c:56%](0,255,0), P[c:81%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 21] <= P[c:0%](255,0,0), P[c:29%](255,255,0), P[c:54%](0,255,0), P[c:79%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 22] <= P[c:0%](255,0,0), P[c:27%](255,255,0), P[c:52%](0,255,0), P[c:77%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 23] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 24] <= P[c:0%](255,0,0), P[c:23%](255,255,0), P[c:47%](0,255,0), P[c:73%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 25] <= P[c:0%](255,0,0), P[c:21%](255,255,0), P[c:44%](0,255,0), P[c:71%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 26] <= P[c:0%](255,0,0), P[c:19%](255,255,0), P[c:41%](0,255,0), P[c:69%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 27] <= P[c:0%](255,0,0), P[c:17%](255,255,0), P[c:38%](0,255,0), P[c:67%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 28] <= P[c:0%](255,0,0), P[c:15%](255,255,0), P[c:35%](0,255,0), P[c:65%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 29] <= P[c:0%](255,0,0), P[c:13%](255,255,0), P[c:33%](0,255,0), P[c:63%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 30] <= P[c:0%](255,0,0), P[c:11%](255,255,0), P[c:30%](0,255,0), P[c:61%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 31] <= P[c:0%](255,0,0), P[c:9%](255,255,0), P[c:27%](0,255,0), P[c:59%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 32] <= P[c:0%](255,0,0), P[c:7%](255,255,0), P[c:24%](0,255,0), P[c:57%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 33] <= P[c:0%](255,0,0), P[c:5%](255,255,0), P[c:21%](0,255,0), P[c:55%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 34] <= P[c:0%](255,0,0), P[c:3%](255,255,0), P[c:18%](0,255,0), P[c:53%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 35] <= P[c:0%](255,0,0), P[c:3%](255,255,0), P[c:18%](0,255,0), P[c:53%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 36] <= P[c:0%](255,0,0), P[c:3%](255,255,0), P[c:18%](0,255,0), P[c:53%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 37] <= P[c:0%](255,0,0), P[c:10%](255,255,0), P[c:20%](0,255,0), P[c:60%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 38] <= P[c:0%](255,0,0), P[c:20%](255,255,0), P[c:40%](0,255,0), P[c:70%](0,0,255), P[c:100%](127,0,255);
A[rainbow_fill_interp, 39] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_fill_interp, 40] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
# Rainbow Top Row
A[rainbow_static_fill_interp_top_row] <= pfunc:interp;
A[rainbow_static_fill_interp_top_row, 1] <= P[c:0%,r:1](255,0,0), P[c:25%,r:1](255,255,0), P[c:50%,r:1](0,255,0), P[c:75%,r:1](0,0,255), P[c:100%,r:1](127,0,255);
# Fade-out example
# TODO - Use frame interpolation when ready
A[fade_out] <= pfunc:interp;
A[fade_out, 16] <= P[c:0%](0,0,0), P[c:100%](0,0,0);
A[fade_out, 15] <= P[c:0%](5,5,5), P[c:100%](5,5,5);
A[fade_out, 14] <= P[c:0%](10,10,10), P[c:100%](10,10,10);
A[fade_out, 13] <= P[c:0%](20,20,20), P[c:100%](20,20,20);
A[fade_out, 12] <= P[c:0%](40,40,40), P[c:100%](40,40,40);
A[fade_out, 11] <= P[c:0%](60,60,60), P[c:100%](60,60,60);
A[fade_out, 10] <= P[c:0%](80,80,80), P[c:100%](80,80,80);
A[fade_out, 9] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
A[fade_out, 8] <= P[c:0%](130,130,130), P[c:100%](130,130,130);
A[fade_out, 7] <= P[c:0%](160,160,160), P[c:100%](160,160,160);
A[fade_out, 6] <= P[c:0%](190,190,190), P[c:100%](190,190,190);
A[fade_out, 5] <= P[c:0%](220,220,220), P[c:100%](220,220,220);
A[fade_out, 4] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
A[fade_out, 3] <= P[c:0%](200,200,200), P[c:100%](200,200,200);
A[fade_out, 2] <= P[c:0%](150,150,150), P[c:100%](150,150,150);
A[fade_out, 1] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
U"Right" :+ A[relative_add](start);
U"ScrollLock" :+ A[relative_add](start);
U"Insert" :+ A[fade_in](start);
# Relative animation test
A[relative_add] <= replace:basic, loops:2, framedelay:1;
A[relative_add, 1] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 2] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 3] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 4] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 5] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 6] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 7] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 8] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 9] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 10] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 11] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 12] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 13] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 14] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 15] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 16] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 17] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 18] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 19] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 20] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
#A[relative_add, 21] <= P[r:i,c:i](50,50,50);
#A[relative_add, 2] <= P[c:i](+100,+100,+100);
#A[relative_add, 3] <= P[r:i,c:i](+100,+100,+100);
# TODO Move to U"CapsLock" when working
A[caps_lock] <= loop, replace:all;
A[caps_lock, 1] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 2] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 3] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 4] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 5] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 6] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 7] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 5] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 6] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 8] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 9] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 10] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 11] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 12] <= S[0x36](-:20,-:20,-:20);
A[fade, 1] <= P[c:0%](+:1,+:1,+:1), P[c:100%](+:1,+:1,+:1);
A[spot_up] <= loops:30;
A[spot_up, 1] <=
P[r:i, c:i ](+:2,+:2,+:2),
P[r:i+1,c:i ](-:1,-:1,-:1),
P[r:i-1,c:i ](-:1,-:1,-:1),
P[r:i, c:i+1](-:1,-:1,-:1),
P[r:i, c:i-1](-:1,-:1,-:1),
P[r:i+1,c:i+1](-:1,-:1,-:1),
P[r:i-1,c:i+1](-:1,-:1,-:1),
P[r:i+1,c:i-1](-:1,-:1,-:1),
P[r:i-1,c:i-1](-:1,-:1,-:1),
P[r:i+2,c:i ](-:1,-:1,-:1),
P[r:i-2,c:i ](-:1,-:1,-:1),
P[r:i, c:i+2](-:1,-:1,-:1),
P[r:i, c:i-2](-:1,-:1,-:1);
#U[0x03-0x52,0xE0-0xE7] :+ A[spot_up];
# TODO Move to U"ScrollLock" when working
A[scroll_lock] <= loop, replace:all;
A[scroll_lock, 1] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 2] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 3] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 4] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 5] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 6] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 7] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 5] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 6] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 8] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 9] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 10] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 11] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 12] <= S[0xF](-:20,-:20,-:20);
# Lock Keys
A[lock_event] <= loops:1, replace:all, framedelay:1, replace:basic;
A[lock_event, 1] <= P[r:i,c:i](+:100,+:100,+:100);
A[lock_event, 2] <= P[r:i,c:i](+:100,+:100,+:100);
A[lock_event, 3] <= P[r:i,c:i](+:100,+:100,+:100);
A[lock_event, 4] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 5] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 6] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 7] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 8] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 9] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 10] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 11] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 12] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 13] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 14] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 15] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 16] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 17] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 18] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 19] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 20] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 21] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 22] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 23] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 24] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 25] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 26] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 27] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 28] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 29] <= P[r:i,c:i](-:10,-:10,-:10);
U"RCtrl" :+ layerLock(1);
U"App" :+ layerLock(2);
# Underlighting streak
A[under_streak] <= loops:1, replace:stack;
A[under_streak, 1] <=
P[102](+:91,+:50,+:86),
P[103](+:255,+:255,+:255),
P[104](-:55,-:100,-:100),
P[105](-:100,-:100,-:100),
P[106](-:100,-:100,-:100);
A[under_streak, 2] <=
P[101](+:91,+:50,+:86),
P[102](+:255,+:255,+:255),
P[103](-:55,-:100,-:100),
P[104](-:100,-:100,-:100),
P[105](-:100,-:100,-:100);
A[under_streak, 3] <=
P[100](+:91,+:50,+:86),
P[101](+:255,+:255,+:255),
P[102](-:55,-:100,-:100),
P[103](-:100,-:100,-:100),
P[104](-:100,-:100,-:100);
A[under_streak, 4] <=
P[99](+:91,+:50,+:86),
P[100](+:255,+:255,+:255),
P[101](-:55,-:100,-:100),
P[102](-:100,-:100,-:100),
P[103](-:100,-:100,-:100);
A[under_streak, 5] <=
P[98](+:91,+:50,+:86),
P[99](+:255,+:255,+:255),
P[100](-:55,-:100,-:100),
P[101](-:100,-:100,-:100),
P[102](-:100,-:100,-:100);
A[under_streak, 6] <=
P[97](+:91,+:50,+:86),
P[98](+:255,+:255,+:255),
P[99](-:55,-:100,-:100),
P[100](-:100,-:100,-:100),
P[101](-:100,-:100,-:100);
A[under_streak, 7] <=
P[128](+:91,+:50,+:86),
P[97](+:255,+:255,+:255),
P[98](-:55,-:100,-:100),
P[99](-:100,-:100,-:100),
P[100](-:100,-:100,-:100);
A[under_streak, 8] <=
P[127](+:91,+:50,+:86),
P[128](+:255,+:255,+:255),
P[97](-:55,-:100,-:100),
P[98](-:100,-:100,-:100),
P[99](-:100,-:100,-:100);
A[under_streak, 9] <=
P[126](+:91,+:50,+:86),
P[127](+:255,+:255,+:255),
P[128](-:55,-:100,-:100),
P[97](-:100,-:100,-:100),
P[98](-:100,-:100,-:100);
A[under_streak, 10] <=
P[125](+:91,+:50,+:86),
P[126](+:255,+:255,+:255),
P[127](-:55,-:100,-:100),
P[128](-:100,-:100,-:100),
P[97](-:100,-:100,-:100);
A[under_streak, 11] <=
P[124](+:91,+:50,+:86),
P[125](+:255,+:255,+:255),
P[126](-:55,-:100,-:100),
P[127](-:100,-:100,-:100),
P[128](-:100,-:100,-:100);
A[under_streak, 12] <=
P[123](+:91,+:50,+:86),
P[124](+:255,+:255,+:255),
P[125](-:55,-:100,-:100),
P[126](-:100,-:100,-:100),
P[127](-:100,-:100,-:100);
A[under_streak, 13] <=
P[122](+:91,+:50,+:86),
P[123](+:255,+:255,+:255),
P[124](-:55,-:100,-:100),
P[125](-:100,-:100,-:100),
P[126](-:100,-:100,-:100);
A[under_streak, 14] <=
P[121](+:91,+:50,+:86),
P[122](+:255,+:255,+:255),
P[123](-:55,-:100,-:100),
P[124](-:100,-:100,-:100),
P[125](-:100,-:100,-:100);
A[under_streak, 15] <=
P[120](+:91,+:50,+:86),
P[121](+:255,+:255,+:255),
P[122](-:55,-:100,-:100),
P[123](-:100,-:100,-:100),
P[124](-:100,-:100,-:100);
A[under_streak, 16] <=
P[119](+:91,+:50,+:86),
P[120](+:255,+:255,+:255),
P[121](-:55,-:100,-:100),
P[122](-:100,-:100,-:100),
P[123](-:100,-:100,-:100);
A[under_streak, 17] <=
P[118](+:91,+:50,+:86),
P[119](+:255,+:255,+:255),
P[120](-:55,-:100,-:100),
P[121](-:100,-:100,-:100),
P[122](-:100,-:100,-:100);
A[under_streak, 18] <=
P[117](+:91,+:50,+:86),
P[118](+:255,+:255,+:255),
P[119](-:55,-:100,-:100),
P[120](-:100,-:100,-:100),
P[121](-:100,-:100,-:100);
A[under_streak, 19] <=
P[116](+:91,+:50,+:86),
P[117](+:255,+:255,+:255),
P[118](-:55,-:100,-:100),
P[119](-:100,-:100,-:100),
P[120](-:100,-:100,-:100);
A[under_streak, 20] <=
P[115](+:91,+:50,+:86),
P[116](+:255,+:255,+:255),
P[117](-:55,-:100,-:100),
P[118](-:100,-:100,-:100),
P[119](-:100,-:100,-:100);
A[under_streak, 21] <=
P[114](+:91,+:50,+:86),
P[115](+:255,+:255,+:255),
P[116](-:55,-:100,-:100),
P[117](-:100,-:100,-:100),
P[118](-:100,-:100,-:100);
A[under_streak, 22] <=
P[113](+:91,+:50,+:86),
P[114](+:255,+:255,+:255),
P[115](-:55,-:100,-:100),
P[116](-:100,-:100,-:100),
P[117](-:100,-:100,-:100);
A[under_streak, 23] <=
P[112](+:91,+:50,+:86),
P[113](+:255,+:255,+:255),
P[114](-:55,-:100,-:100),
P[115](-:100,-:100,-:100),
P[116](-:100,-:100,-:100);
A[under_streak, 24] <=
P[111](+:91,+:50,+:86),
P[112](+:255,+:255,+:255),
P[113](-:55,-:100,-:100),
P[114](-:100,-:100,-:100),
P[115](-:100,-:100,-:100);
A[under_streak, 25] <=
P[110](+:91,+:50,+:86),
P[111](+:255,+:255,+:255),
P[112](-:55,-:100,-:100),
P[113](-:100,-:100,-:100),
P[114](-:100,-:100,-:100);
A[under_streak, 26] <=
P[109](+:91,+:50,+:86),
P[110](+:255,+:255,+:255),
P[111](-:55,-:100,-:100),
P[112](-:100,-:100,-:100),
P[113](-:100,-:100,-:100);
A[under_streak, 27] <=
P[108](+:91,+:50,+:86),
P[109](+:255,+:255,+:255),
P[110](-:55,-:100,-:100),
P[111](-:100,-:100,-:100),
P[112](-:100,-:100,-:100);
A[under_streak, 28] <=
P[107](+:91,+:50,+:86),
P[108](+:255,+:255,+:255),
P[109](-:55,-:100,-:100),
P[110](-:100,-:100,-:100),
P[111](-:100,-:100,-:100);
A[under_streak, 29] <=
P[106](+:91,+:50,+:86),
P[107](+:255,+:255,+:255),
P[108](-:55,-:100,-:100),
P[109](-:100,-:100,-:100),
P[110](-:100,-:100,-:100);
A[under_streak, 30] <=
P[105](+:91,+:50,+:86),
P[106](+:255,+:255,+:255),
P[107](-:55,-:100,-:100),
P[108](-:100,-:100,-:100),
P[109](-:100,-:100,-:100);
A[under_streak, 31] <=
P[104](+:91,+:50,+:86),
P[105](+:255,+:255,+:255),
P[106](-:55,-:100,-:100),
P[107](-:100,-:100,-:100),
P[108](-:100,-:100,-:100);
A[under_streak, 32] <=
P[103](+:91,+:50,+:86),
P[104](+:255,+:255,+:255),
P[105](-:55,-:100,-:100),
P[106](-:100,-:100,-:100),
P[107](-:100,-:100,-:100);
# Fade-out
A[under_streak, 33] <=
P[103](+:255,+:255,+:255),
P[104](-:55,-:100,-:100),
P[105](-:100,-:100,-:100),
P[106](-:200,-:200,-:200);
A[under_streak, 33] <=
P[103](-:55,-:100,-:100),
P[104](-:100,-:100,-:100),
P[105](-:200,-:200,-:200);
A[under_streak, 34] <=
P[103](-:100,-:100,-:100),
P[104](-:200,-:200,-:200);
A[under_streak, 35] <=
P[103](-:200,-:200,-:200);
A[under_streak, 36] <=
P[106](0,0,0);
# Rainbow wave
A[rainbow_wave] <= framedelay:0x3, loop, replace:all, pfunc:interp;
A[rainbow_wave, 1] <= P[c:0%] (255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wave, 2] <= P[c:-24%](127,0,255), P[c:2%] (255,0,0), P[c:27%](255,255,0), P[c:52%](0,255,0), P[c:77%](0,0,255), P[c:102%](127,0,255);
A[rainbow_wave, 3] <= P[c:-22%](127,0,255), P[c:4%] (255,0,0), P[c:29%](255,255,0), P[c:54%](0,255,0), P[c:79%](0,0,255), P[c:104%](127,0,255);
A[rainbow_wave, 4] <= P[c:-20%](127,0,255), P[c:6%] (255,0,0), P[c:31%](255,255,0), P[c:56%](0,255,0), P[c:81%](0,0,255), P[c:106%](127,0,255);
A[rainbow_wave, 5] <= P[c:-18%](127,0,255), P[c:8%] (255,0,0), P[c:33%](255,255,0), P[c:58%](0,255,0), P[c:83%](0,0,255), P[c:108%](127,0,255);
A[rainbow_wave, 6] <= P[c:-16%](127,0,255), P[c:10%](255,0,0), P[c:35%](255,255,0), P[c:60%](0,255,0), P[c:85%](0,0,255), P[c:110%](127,0,255);
A[rainbow_wave, 7] <= P[c:-14%](127,0,255), P[c:12%](255,0,0), P[c:37%](255,255,0), P[c:62%](0,255,0), P[c:87%](0,0,255), P[c:112%](127,0,255);
A[rainbow_wave, 8] <= P[c:-12%](127,0,255), P[c:14%](255,0,0), P[c:39%](255,255,0), P[c:64%](0,255,0), P[c:89%](0,0,255), P[c:114%](127,0,255);
A[rainbow_wave, 9] <= P[c:-10%](127,0,255), P[c:16%](255,0,0), P[c:41%](255,255,0), P[c:66%](0,255,0), P[c:91%](0,0,255), P[c:116%](127,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (127,0,255), P[c:18%](255,0,0), P[c:43%](255,255,0), P[c:68%](0,255,0), P[c:93%](0,0,255), P[c:118%](127,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (127,0,255), P[c:20%](255,0,0), P[c:45%](255,255,0), P[c:70%](0,255,0), P[c:95%](0,0,255), P[c:120%](127,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (127,0,255), P[c:22%](255,0,0), P[c:47%](255,255,0), P[c:72%](0,255,0), P[c:97%](0,0,255), P[c:122%](127,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (127,0,255), P[c:24%](255,0,0), P[c:49%](255,255,0), P[c:74%](0,255,0), P[c:99%](0,0,255), P[c:124%](127,0,255);
A[rainbow_wave, 14] <= P[c:0%] (127,0,255), P[c:25%](255,0,0), P[c:50%](255,255,0), P[c:75%](0,255,0), P[c:100%](0,0,255);
A[rainbow_wave, 15] <= P[c:-24%](0,0,255), P[c:2%] (127,0,255), P[c:27%](255,0,0), P[c:52%](255,255,0), P[c:77%](0,255,0), P[c:102%](0,0,255);
A[rainbow_wave, 16] <= P[c:-22%](0,0,255), P[c:4%] (127,0,255), P[c:29%](255,0,0), P[c:54%](255,255,0), P[c:79%](0,255,0), P[c:104%](0,0,255);
A[rainbow_wave, 17] <= P[c:-20%](0,0,255), P[c:6%] (127,0,255), P[c:31%](255,0,0), P[c:56%](255,255,0), P[c:81%](0,255,0), P[c:106%](0,0,255);
A[rainbow_wave, 18] <= P[c:-18%](0,0,255), P[c:8%] (127,0,255), P[c:33%](255,0,0), P[c:58%](255,255,0), P[c:83%](0,255,0), P[c:108%](0,0,255);
A[rainbow_wave, 19] <= P[c:-16%](0,0,255), P[c:10%](127,0,255), P[c:35%](255,0,0), P[c:60%](255,255,0), P[c:85%](0,255,0), P[c:110%](0,0,255);
A[rainbow_wave, 20] <= P[c:-14%](0,0,255), P[c:12%](127,0,255), P[c:37%](255,0,0), P[c:62%](255,255,0), P[c:87%](0,255,0), P[c:112%](0,0,255);
A[rainbow_wave, 21] <= P[c:-12%](0,0,255), P[c:14%](127,0,255), P[c:39%](255,0,0), P[c:64%](255,255,0), P[c:89%](0,255,0), P[c:114%](0,0,255);
A[rainbow_wave, 22] <= P[c:-10%](0,0,255), P[c:16%](127,0,255), P[c:41%](255,0,0), P[c:66%](255,255,0), P[c:91%](0,255,0), P[c:116%](0,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (0,0,255), P[c:18%](127,0,255), P[c:43%](255,0,0), P[c:68%](255,255,0), P[c:93%](0,255,0), P[c:118%](0,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (0,0,255), P[c:20%](127,0,255), P[c:45%](255,0,0), P[c:70%](255,255,0), P[c:95%](0,255,0), P[c:120%](0,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (0,0,255), P[c:22%](127,0,255), P[c:47%](255,0,0), P[c:72%](255,255,0), P[c:97%](0,255,0), P[c:122%](0,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (0,0,255), P[c:24%](127,0,255), P[c:49%](255,0,0), P[c:74%](255,255,0), P[c:99%](0,255,0), P[c:124%](0,0,255);
A[rainbow_wave, 28] <= P[c:0%] (0,0,255), P[c:25%](127,0,255), P[c:50%](255,0,0), P[c:75%](255,255,0), P[c:100%](0,255,0);
A[rainbow_wave, 29] <= P[c:-24%](0,255,0), P[c:2%] (0,0,255), P[c:27%](127,0,255), P[c:52%](255,0,0), P[c:77%](255,255,0), P[c:102%](0,255,0);
A[rainbow_wave, 30] <= P[c:-22%](0,255,0), P[c:4%] (0,0,255), P[c:29%](127,0,255), P[c:54%](255,0,0), P[c:79%](255,255,0), P[c:104%](0,255,0);
A[rainbow_wave, 31] <= P[c:-20%](0,255,0), P[c:6%] (0,0,255), P[c:31%](127,0,255), P[c:56%](255,0,0), P[c:81%](255,255,0), P[c:106%](0,255,0);
A[rainbow_wave, 32] <= P[c:-18%](0,255,0), P[c:8%] (0,0,255), P[c:33%](127,0,255), P[c:58%](255,0,0), P[c:83%](255,255,0), P[c:108%](0,255,0);
A[rainbow_wave, 33] <= P[c:-16%](0,255,0), P[c:10%](0,0,255), P[c:35%](127,0,255), P[c:60%](255,0,0), P[c:85%](255,255,0), P[c:110%](0,255,0);
A[rainbow_wave, 34] <= P[c:-14%](0,255,0), P[c:12%](0,0,255), P[c:37%](127,0,255), P[c:62%](255,0,0), P[c:87%](255,255,0), P[c:112%](0,255,0);
A[rainbow_wave, 35] <= P[c:-12%](0,255,0), P[c:14%](0,0,255), P[c:39%](127,0,255), P[c:64%](255,0,0), P[c:89%](255,255,0), P[c:114%](0,255,0);
A[rainbow_wave, 36] <= P[c:-10%](0,255,0), P[c:16%](0,0,255), P[c:41%](127,0,255), P[c:66%](255,0,0), P[c:91%](255,255,0), P[c:116%](0,255,0);
A[rainbow_wave, 37] <= P[c:-8%] (0,255,0), P[c:18%](0,0,255), P[c:43%](127,0,255), P[c:68%](255,0,0), P[c:93%](255,255,0), P[c:118%](0,255,0);
A[rainbow_wave, 38] <= P[c:-6%] (0,255,0), P[c:20%](0,0,255), P[c:45%](127,0,255), P[c:70%](255,0,0), P[c:95%](255,255,0), P[c:120%](0,255,0);
A[rainbow_wave, 39] <= P[c:-4%] (0,255,0), P[c:22%](0,0,255), P[c:47%](127,0,255), P[c:72%](255,0,0), P[c:97%](255,255,0), P[c:122%](0,255,0);
A[rainbow_wave, 40] <= P[c:-2%] (0,255,0), P[c:24%](0,0,255), P[c:49%](127,0,255), P[c:74%](255,0,0), P[c:99%](255,255,0), P[c:124%](0,255,0);
A[rainbow_wave, 41] <= P[c:0%] (0,255,0), P[c:25%](0,0,255), P[c:50%](127,0,255), P[c:75%](255,0,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (0,255,0), P[c:27%](0,0,255), P[c:52%](127,0,255), P[c:77%](255,0,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (0,255,0), P[c:29%](0,0,255), P[c:54%](127,0,255), P[c:79%](255,0,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (0,255,0), P[c:31%](0,0,255), P[c:56%](127,0,255), P[c:81%](255,0,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (0,255,0), P[c:33%](0,0,255), P[c:58%](127,0,255), P[c:83%](255,0,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](0,255,0), P[c:35%](0,0,255), P[c:60%](127,0,255), P[c:85%](255,0,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](0,255,0), P[c:37%](0,0,255), P[c:62%](127,0,255), P[c:87%](255,0,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](0,255,0), P[c:39%](0,0,255), P[c:64%](127,0,255), P[c:89%](255,0,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](0,255,0), P[c:41%](0,0,255), P[c:66%](127,0,255), P[c:91%](255,0,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](0,255,0), P[c:43%](0,0,255), P[c:68%](127,0,255), P[c:93%](255,0,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](0,255,0), P[c:45%](0,0,255), P[c:70%](127,0,255), P[c:95%](255,0,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](0,255,0), P[c:47%](0,0,255), P[c:72%](127,0,255), P[c:97%](255,0,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](0,255,0), P[c:49%](0,0,255), P[c:74%](127,0,255), P[c:99%](255,0,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](0,255,0), P[c:50%](0,0,255), P[c:75%](127,0,255), P[c:100%](255,0,0);
A[rainbow_wave, 55] <= P[c:-24%](255,0,0), P[c:2%] (255,255,0), P[c:27%](0,255,0), P[c:52%](0,0,255), P[c:77%](127,0,255), P[c:102%](255,0,0);
A[rainbow_wave, 56] <= P[c:-22%](255,0,0), P[c:4%] (255,255,0), P[c:29%](0,255,0), P[c:54%](0,0,255), P[c:79%](127,0,255), P[c:104%](255,0,0);
A[rainbow_wave, 57] <= P[c:-20%](255,0,0), P[c:6%] (255,255,0), P[c:31%](0,255,0), P[c:56%](0,0,255), P[c:81%](127,0,255), P[c:106%](255,0,0);
A[rainbow_wave, 58] <= P[c:-18%](255,0,0), P[c:8%] (255,255,0), P[c:33%](0,255,0), P[c:58%](0,0,255), P[c:83%](127,0,255), P[c:108%](255,0,0);
A[rainbow_wave, 59] <= P[c:-16%](255,0,0), P[c:10%](255,255,0), P[c:35%](0,255,0), P[c:60%](0,0,255), P[c:85%](127,0,255), P[c:110%](255,0,0);
A[rainbow_wave, 60] <= P[c:-14%](255,0,0), P[c:12%](255,255,0), P[c:37%](0,255,0), P[c:62%](0,0,255), P[c:87%](127,0,255), P[c:112%](255,0,0);
A[rainbow_wave, 61] <= P[c:-12%](255,0,0), P[c:14%](255,255,0), P[c:39%](0,255,0), P[c:64%](0,0,255), P[c:89%](127,0,255), P[c:114%](255,0,0);
A[rainbow_wave, 62] <= P[c:-10%](255,0,0), P[c:16%](255,255,0), P[c:41%](0,255,0), P[c:66%](0,0,255), P[c:91%](127,0,255), P[c:116%](255,0,0);
A[rainbow_wave, 63] <= P[c:-8%] (255,0,0), P[c:18%](255,255,0), P[c:43%](0,255,0), P[c:68%](0,0,255), P[c:93%](127,0,255), P[c:118%](255,0,0);
A[rainbow_wave, 64] <= P[c:-6%] (255,0,0), P[c:20%](255,255,0), P[c:45%](0,255,0), P[c:70%](0,0,255), P[c:95%](127,0,255), P[c:120%](255,0,0);
A[rainbow_wave, 65] <= P[c:-4%] (255,0,0), P[c:22%](255,255,0), P[c:47%](0,255,0), P[c:72%](0,0,255), P[c:97%](127,0,255), P[c:122%](255,0,0);
A[rainbow_wave, 66] <= P[c:-2%] (255,0,0), P[c:24%](255,255,0), P[c:49%](0,255,0), P[c:74%](0,0,255), P[c:99%](127,0,255), P[c:124%](255,0,0);
# Start/stop wave
U["Print"] :+ A[rainbow_wave](start);
U["Pause"] :+ A[rainbow_wave](start, loops:1, framedelay:0);
# Full-brightness white
A[all_white] <= pfunc:interp;
A[all_white, 1] <= P[c:0%](255,255,255), P[c:100%](255,255,255);
#U["Space"] :+ animation_control(0);
#U["Right"] :+ animation_control(1);
# Massdrop blue to pink (miami)
A[blue_to_pink] <= pfunc:interp;
A[blue_to_pink, 1] <= P[c:0%](0,0xAE,0xDA), P[c:100%](0xFF,0x14,0x93); # FF1493
# Color wheel
A[color_wheel] <= pfunc:interp;
A[color_wheel, 1] <= P[c:0%](0,0xAE,0xDA), P[c:100%](0xFF,0xDA,0xE9);
# Keys only color rotation
A[keys_only_rotation] <= framedelay:0xF, loop, replace:all, pfunc:interp;
A[keys_only_rotation, 1] <= S0x01(0xC1, 0x2D, 0x1D), S0x5F(0xC1, 0x2D, 0x1D);
A[keys_only_rotation, 2] <= S0x01(0xEE, 0x6D, 0x28), S0x5F(0xEE, 0x6D, 0x28);
A[keys_only_rotation, 3] <= S0x01(0xE0, 0x9E, 0x3B), S0x5F(0xE0, 0x9E, 0x3B);
A[keys_only_rotation, 4] <= S0x01(0xE5, 0xC9, 0x43), S0x5F(0xE5, 0xC9, 0x43);
A[keys_only_rotation, 5] <= S0x01(0x1E, 0xB8, 0x6D), S0x5F(0x1E, 0xB8, 0x6D);
A[keys_only_rotation, 6] <= S0x01(0x00, 0xB3, 0xA6), S0x5F(0x00, 0xB3, 0xA6);
A[keys_only_rotation, 7] <= S0x01(0x20, 0x82, 0xC6), S0x5F(0x20, 0x82, 0xC6);
A[keys_only_rotation, 8] <= S0x01(0x43, 0x54, 0xC1), S0x5F(0x43, 0x54, 0xC1);
A[keys_only_rotation, 9] <= S0x01(0x71, 0x1C, 0x9E), S0x5F(0x71, 0x1C, 0x9E);
A[keys_only_rotation, 10] <= S0x01(0xCD, 0x3B, 0x70), S0x5F(0xCD, 0x3B, 0x70);
A[keys_only_rotation, 11] <= S0x01(0xB8, 0x34, 0x3E), S0x5F(0xB8, 0x34, 0x3E);
# Underlighting only color rotation
A[underlighting_only_rotation] <= framedelay:0xF, loop, replace:all, pfunc:interp;
A[underlighting_only_rotation, 1] <= P[88](0xC1, 0x2D, 0x1D), P[128](0xC1, 0x2D, 0x1D);
A[underlighting_only_rotation, 2] <= P[88](0xEE, 0x6D, 0x28), P[128](0xEE, 0x6D, 0x28);
A[underlighting_only_rotation, 3] <= P[88](0xE0, 0x9E, 0x3B), P[128](0xE0, 0x9E, 0x3B);
A[underlighting_only_rotation, 4] <= P[88](0xE5, 0xC9, 0x43), P[128](0xE5, 0xC9, 0x43);
A[underlighting_only_rotation, 5] <= P[88](0x1E, 0xB8, 0x6D), P[128](0x1E, 0xB8, 0x6D);
A[underlighting_only_rotation, 6] <= P[88](0x00, 0xB3, 0xA6), P[128](0x00, 0xB3, 0xA6);
A[underlighting_only_rotation, 7] <= P[88](0x20, 0x82, 0xC6), P[128](0x20, 0x82, 0xC6);
A[underlighting_only_rotation, 8] <= P[88](0x43, 0x54, 0xC1), P[128](0x43, 0x54, 0xC1);
A[underlighting_only_rotation, 9] <= P[88](0x71, 0x1C, 0x9E), P[128](0x71, 0x1C, 0x9E);
A[underlighting_only_rotation, 10] <= P[88](0xCD, 0x3B, 0x70), P[128](0xCD, 0x3B, 0x70);
A[underlighting_only_rotation, 11] <= P[88](0xB8, 0x34, 0x3E), P[128](0xB8, 0x34, 0x3E);
U"F1" +: A[keys_only_rotation](start, framedelay:0x1F);
U"F2" +: A[keys_only_rotation](start);
U"F3" +: A[keys_only_rotation](start, framedelay:1);
U"F4" +: A[keys_only_rotation](pause);
U"F5" +: A[underlighting_only_rotation](start, framedelay:0x1F);
U"F6" +: A[underlighting_only_rotation](start);
U"F7" +: A[underlighting_only_rotation](start, framedelay:1);
U"F8" +: A[underlighting_only_rotation](pause);
U"Esc" + U"F1" : flashMode();
PK RKMZ Z kll/layouts/colemak.kllName = colemak;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-09-07;
# Top Row
'e' : 'f';
'r' : 'p';
't' : 'g';
'y' : 'j';
'u' : 'l';
'i' : 'u';
'o' : 'y';
'p' : ';';
# Middle Row
's' : 'r';
'd' : 's';
'f' : 't';
'g' : 'd';
'j' : 'n';
'k' : 'e';
'l' : 'i';
';' : 'o';
# Bottom Row
'n' : 'k';
PK RKMnC kll/layouts/funMacros.kllName = funMacros;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-10-14;
# TODO
#U"Function1" : layerShift( 1 );
U"Function2" : '!haata', U"Enter";
U"Function3" : '!w', U"Enter";
#U"Function4" : layerShift( 4 );
#U"Function5" : layerShift( 5 );
#U"Function6" : layerShift( 6 );
#U"Function7" : layerShift( 7 );
#U"Function8" : layerShift( 8 );
#U"Function9" : layerShift( 9 );
#U"Function10" : layerShift( 10 );
#U"Function11" : layerShift( 11 );
#U"Function12" : layerShift( 12 );
#U"Function13" : layerShift( 13 );
#U"Function14" : layerShift( 14 );
#U"Function15" : layerShift( 15 );
#U"Function16" : layerShift( 16 );
PK RKM?&V V kll/layouts/hhkbpro2.kllName = hhkbpro2;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3c;
# Modified Date
Date = 2015-10-12;
# Number Row
U"1" : U"F1";
U"2" : U"F2";
U"3" : U"F3";
U"4" : U"F4";
U"5" : U"F5";
U"6" : U"F6";
U"7" : U"F7";
U"8" : U"F8";
U"9" : U"F9";
U"0" : U"F10";
U"-" : U"F11";
U"=" : U"F12";
U"Backslash" : U"Insert";
U"Backtick" : U"Delete";
# Top Row
U"Tab" : U"Capslock";
U"W" : flashMode();
U"I" : U"PrintScreen";
U"O" : U"ScrollLock";
U"P" : U"Pause";
U"[" : U"Up";
# Middle Row
U"A" : CONS"VolumeDown";
U"S" : CONS"VolumeUp";
U"D" : CONS"Mute";
U"F" : CONS"Eject";
U"H" : U"Keypad Asterisk";
U"J" : U"Keypad Slash";
U"K" : U"Home";
U"L" : U"PageUp";
U";" : U"Left";
U"Quote" : U"Right";
# Bottom Row
U"N" : U"Keypad Plus";
U"M" : U"Keypad Minus";
U"Comma" : U"End";
U"Period" : U"PageDown";
U"Slash" : U"Down";
# Space Row
# N/A
PK RKMĜ0 0 " kll/layouts/hhkbpro2_shiftLeft.kllName = hhkbpro2_shiftLeft;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Same has hhkbpro2 but accomodates a right FN key that is further to the left
# This means that all the keys meant to be pressed together with FN (same hand) are shifted left 1 key
# Modified Date
Date = 2014-10-02;
# Number Row
U"1" : U"F1";
U"2" : U"F2";
U"3" : U"F3";
U"4" : U"F4";
U"5" : U"F5";
U"6" : U"F6";
U"7" : U"F7";
U"8" : U"F8";
U"9" : U"F9";
U"0" : U"F10";
U"-" : U"F11";
U"=" : U"F12";
U"Backslash" : U"Insert";
U"Backtick" : U"Delete";
# Top Row
U"Tab" : U"Capslock";
U"U" : U"PrintScreen";
U"I" : U"ScrollLock";
U"O" : U"Pause";
U"P" : U"Up";
# Middle Row
U"A" : U"VolumeDown";
U"S" : U"VolumeUp";
U"D" : U"Mute";
#U"F" : U"Eject"; # TODO Requires additional firmware support for media keys -HaaTa
U"G" : U"Keypad Asterix";
U"H" : U"Keypad Slash";
U"J" : U"Home";
U"K" : U"PageUp";
U"L" : U"Left";
U";" : U"Right";
# Bottom Row
U"B" : U"Keypad Plus";
U"N" : U"Keypad Minus";
U"M" : U"End";
U"Comma" : U"PageDown";
U"Period" : U"Down";
# Space Row
# N/A
PK RKM"q~ ~ kll/layouts/hhkbpro2_slim.kllName = hhkbpro2_slim;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-09-16;
# Number Row
U"1" : U"F1";
U"2" : U"F2";
U"3" : U"F3";
U"4" : U"F4";
U"5" : U"F5";
U"6" : U"F6";
U"7" : U"F7";
U"8" : U"F8";
U"9" : U"F9";
U"0" : U"F10";
U"-" : U"F11";
U"=" : U"F12";
U"Backslash" : U"Insert";
U"Backtick" : U"Delete";
# Top Row
U"Tab" : U"Capslock";
U"[" : U"Up";
# Middle Row
U"K" : U"Home";
U"L" : U"PageUp";
U";" : U"Left";
U"Quote" : U"Right";
# Bottom Row
U"N" : U"Keypad Plus";
U"M" : U"Keypad Minus";
U"Comma" : U"End";
U"Period" : U"PageDown";
U"Slash" : U"Down";
# Space Row
# N/A
PK RKM kll/layouts/kishsaver_unix1.kllName = kishsaver_unix1;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-09-16;
# Row 1
U"Backtick" : U"Esc";
U"Inter2" : U"Backtick";
U"Backspace" : U"Delete";
# Row 2
U"Delete" : U"Backspace";
# Row 3
U"CapsLock" : U"Ctrl";
# Row 4
U"Inter3" : U"RShift";
U"RShift" : U"Function1";
# Row 5
U"LCtrl" : U"Function2";
U"LGui" : U"LAlt";
U"LAlt" : U"LGui";
U"RAlt" : U"RGui";
U"RGui" : U"RAlt";
U"RCtrl" : U"Function3";
PK RKM3x kll/layouts/kishsaver_unix2.kllName = kishsaver_unix1;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3a;
# Kishsaver UNIX Style with ANSI Enter
# Modified Date
Date = 2015-01-15;
# Row 1
U"Backtick" : U"Esc";
U"Inter2" : U"Backtick";
U"Backspace" : U"Delete";
# Row 2
U"Delete" : U"Backspace";
# Row 3
U"CapsLock" : U"Ctrl";
U"Backslash" : U"Enter";
# Row 4
U"Inter3" : U"RShift";
U"RShift" : U"Function1";
# Row 5
U"LCtrl" : U"Function2";
U"LGui" : U"LAlt";
U"LAlt" : U"LGui";
U"RAlt" : U"RGui";
U"RGui" : U"RAlt";
U"RCtrl" : U"Function3";
PK RKM
V) ) kll/layouts/klltest.kll# klltest - Layout used for unit tests in Keyboards/Testing/klltest.bash
Name = klltest;
Version = 0.3;
Author = "HaaTa (Jacob Alexander) 2018";
KLL = 0.5;
# Modified Date
Date = 2018-04-21;
# Trigger Tests
U"A" + U"B" : U"C";
'cab' : U"D";
'qq' : U"E";
U"E", U"F" : U"G";
# Result Tests
U"G" : U"A" + U"B";
U"H" : 'ijk';
U"O" : 'ioo';
U"P" : U"Q", U"R";
# Layer Tests (new syntax)
U"I" : Layer[1];
U"T" : LayerShift[1];
U"J" : LayerLatch[2];
U"K" : LayerLock[3];
U"L" : Layer[10]; # Supposed to be invalid
# Consumer Control
U"M" : CONS"Eject";
# System Control
U"N" : SYS"Sleep";
# Animation Test
U"Q" : A[testanimation](start);
U"R" : A[blue_fill_interp](start);
U"S" : A[testanimation](start) + A[blue_fill_interp](start);
# Indicator Test
I"NumLock" : testThreadSafe();
I"CapsLock" : testThreadSafe();
I"ScrollLock" : testThreadSafe();
I"Compose" : testThreadSafe();
I"Kana" : testThreadSafe();
# Layer Trigger Tests
Layer[1] : testThreadSafe();
LayerShift[1] : testThreadUnsafe();
LayerLock[1] : testThreadSafe();
LayerLatch[1] : testThreadSafe();
PK RKMI kll/layouts/ktype_demo.kllName = ktype demo;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2016-2017";
KLL = 0.3d;
# Modified Date
Date = 2017-01-17;
# Toggle Rainbow
# TODO
#U"Menu" + U"F12" : rainbowToggle();
# Reactive Rainbow
# Single shot rainbow
# On each keypress flash one loop of rainbow
# TODO
PK RKM[f1 f1 kll/layouts/lcdFuncMap.kllName = lcdFuncMap;
Version = 0.3;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-09-29;
# Maps each Function key incrementally to each layer
# Unused layers and functions are ignored
U"Function1" :: layerShift( 1 ) + LCDLayerDisplay();
U"Function2" :: layerShift( 2 ) + LCDLayerDisplay();
U"Function3" :: layerShift( 3 ) + LCDLayerDisplay();
U"Function4" :: layerShift( 4 ) + LCDLayerDisplay();
U"Function5" :: layerShift( 5 ) + LCDLayerDisplay();
U"Function6" :: layerShift( 6 ) + LCDLayerDisplay();
U"Function7" :: layerShift( 7 ) + LCDLayerDisplay();
U"Function8" :: layerShift( 8 ) + LCDLayerDisplay();
U"Function9" :: layerShift( 9 ) + LCDLayerDisplay();
U"Function10" :: layerShift( 10 ) + LCDLayerDisplay();
U"Function11" :: layerShift( 11 ) + LCDLayerDisplay();
U"Function12" :: layerShift( 12 ) + LCDLayerDisplay();
U"Function13" :: layerShift( 13 ) + LCDLayerDisplay();
U"Function14" :: layerShift( 14 ) + LCDLayerDisplay();
U"Function15" :: layerShift( 15 ) + LCDLayerDisplay();
U"Function16" :: layerShift( 16 ) + LCDLayerDisplay();
U"Lock1" :: layerLock( 1 ) + LCDLayerDisplay();
U"Lock2" :: layerLock( 2 ) + LCDLayerDisplay();
U"Lock3" :: layerLock( 3 ) + LCDLayerDisplay();
U"Lock4" :: layerLock( 4 ) + LCDLayerDisplay();
U"Lock5" :: layerLock( 5 ) + LCDLayerDisplay();
U"Lock6" :: layerLock( 6 ) + LCDLayerDisplay();
U"Lock7" :: layerLock( 7 ) + LCDLayerDisplay();
U"Lock8" :: layerLock( 8 ) + LCDLayerDisplay();
U"Lock9" :: layerLock( 9 ) + LCDLayerDisplay();
U"Lock10" :: layerLock( 10 ) + LCDLayerDisplay();
U"Lock11" :: layerLock( 11 ) + LCDLayerDisplay();
U"Lock12" :: layerLock( 12 ) + LCDLayerDisplay();
U"Lock13" :: layerLock( 13 ) + LCDLayerDisplay();
U"Lock14" :: layerLock( 14 ) + LCDLayerDisplay();
U"Lock15" :: layerLock( 15 ) + LCDLayerDisplay();
U"Lock16" :: layerLock( 16 ) + LCDLayerDisplay();
U"Latch1" :: layerLatch( 1 ) + LCDLayerDisplay();
U"Latch2" :: layerLatch( 2 ) + LCDLayerDisplay();
U"Latch3" :: layerLatch( 3 ) + LCDLayerDisplay();
U"Latch4" :: layerLatch( 4 ) + LCDLayerDisplay();
U"Latch5" :: layerLatch( 5 ) + LCDLayerDisplay();
U"Latch6" :: layerLatch( 6 ) + LCDLayerDisplay();
U"Latch7" :: layerLatch( 7 ) + LCDLayerDisplay();
U"Latch8" :: layerLatch( 8 ) + LCDLayerDisplay();
U"Latch9" :: layerLatch( 9 ) + LCDLayerDisplay();
U"Latch10" :: layerLatch( 10 ) + LCDLayerDisplay();
U"Latch11" :: layerLatch( 11 ) + LCDLayerDisplay();
U"Latch12" :: layerLatch( 12 ) + LCDLayerDisplay();
U"Latch13" :: layerLatch( 13 ) + LCDLayerDisplay();
U"Latch14" :: layerLatch( 14 ) + LCDLayerDisplay();
U"Latch15" :: layerLatch( 15 ) + LCDLayerDisplay();
U"Latch16" :: layerLatch( 16 ) + LCDLayerDisplay();
# Layer rotation
U"Next Layer" :: layerRotate( 0 ) + LCDLayerDisplay(); # 0 is Next
U"Prev Layer" :: layerRotate( 1 ) + LCDLayerDisplay(); # 1 is Previous
# Colours assigned to each of the LCD numbers
# The "top of stack" layer is the colour used
STLcdNumber0Color => STLcdNumber0Color_define;
STLcdNumber1Color => STLcdNumber1Color_define;
STLcdNumber2Color => STLcdNumber2Color_define;
STLcdNumber3Color => STLcdNumber3Color_define;
STLcdNumber4Color => STLcdNumber4Color_define;
STLcdNumber5Color => STLcdNumber5Color_define;
STLcdNumber6Color => STLcdNumber6Color_define;
STLcdNumber7Color => STLcdNumber7Color_define;
STLcdNumber8Color => STLcdNumber8Color_define;
STLcdNumber9Color => STLcdNumber9Color_define;
# Brightness reduced to 75% and converted to 16-bit colour
# <8-bit>/0xFF * 0xFFFF * 0.75
STLcdNumber1Color = "0xA8A8, 0x2727, 0x1FA0"; # Taylor Swift's Lips #E0342A
STLcdNumber2Color = "0x4B4B, 0x8D8D, 0x34B5"; # Matrix Green #64BC46
STLcdNumber3Color = "0x0000, 0x8242, 0xB3F3"; # Smart Water Blue #00ADEF
STLcdNumber4Color = "0xF6F6, 0xA5A5, 0x4949"; # Fanta Orange #F6A549 (boosted to 100%)
STLcdNumber5Color = "0xB7B7, 0x5D5D, 0x8545"; # Nicki Minaj Pink #F47CB1
STLcdNumber6Color = "0xBCFC, 0xB6F6, 0x2D2D"; # Iowa Corn Yellow #FBDF3C
STLcdNumber7Color = "0x00C1, 0x7E7E, 0x3C3C"; # Sprite Green #01A850
STLcdNumber8Color = "0x8303, 0x1394, 0xB9F9"; # Fanta Purple #AE1AF7
STLcdNumber9Color = "0x09CA, 0x12D3, 0x8484"; # Red Bull Blue #0D19B0
STLcdNumber0Color = "0xB939, 0xAAEA, 0x8D8D"; # Butter Cow Off-white #F6E3BC
# Basic 32x32 numbers used to display layer stack
# Up to 4 of these numbers can be shown at a time on the LCD
STLcdNumber0 => STLcdNumber0_define;
STLcdNumber1 => STLcdNumber1_define;
STLcdNumber2 => STLcdNumber2_define;
STLcdNumber3 => STLcdNumber3_define;
STLcdNumber4 => STLcdNumber4_define;
STLcdNumber5 => STLcdNumber5_define;
STLcdNumber6 => STLcdNumber6_define;
STLcdNumber7 => STLcdNumber7_define;
STLcdNumber8 => STLcdNumber8_define;
STLcdNumber9 => STLcdNumber9_define;
STLcdNumber0 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber1 = "
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
";
STLcdNumber2 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber3 = "
0x00, 0x00, 0x00, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber4 = "
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber5 = "
0x00, 0x00, 0x00, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0x00, 0x00, 0x00,
";
STLcdNumber6 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0x00, 0x00, 0x00,
";
STLcdNumber7 = "
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber8 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber9 = "
0x00, 0x00, 0x00, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
PK RKM^ kll/layouts/md1Action.kllName = md1Overlay;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-08-16;
# Example of blocking ESC (0x29) when holding either Shift key
# Currently, kll only supports numbers (this may be fixed in a future kll spec)
U["LShift", "RShift"] : blockHold( 0x29 );
U"Esc" : blockKey( 0x29 );
# Now that Esc is blocked, it's possible to use this macro
U["LShift", "RShift"] + U"Esc" : '~';
U"Function2" : layerLock( 1 );
U"Function3" : action1();
U"Function4" : U"CapsLock";
PK RKM? kll/layouts/md1Overlay.kllName = md1Overlay;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-11-12;
U"Function2" : layerLock( 1 );
U"Function3" : layerLatch( 1 );
U"Function4" : U"CapsLock";
PK RKMaϡ kll/layouts/mouseTest.kll# Mouse Test
Name = mouseTest;
Version = 0.3;
Author = "HaaTa (Jacob Alexander) 2016-2018";
KLL = 0.5;
# Modified Date
Date = 2018-04-08;
# mouseOut
# Arg1, button, 1-16
# Arg2, mouse x relative axis -32 767 to 32 767
# Arg3, mouse y relative axis -32 767 to 32 767
U"1" : mouseOut( 1, 0, 0 );
U"2" : mouseOut( 2, 0, 0 );
U"3" : mouseOut( 3, 0, 0 );
U"4" : mouseOut( 4, 0, 0 );
U"5" : mouseOut( 5, 0, 0 );
U"6" : mouseOut( 6, 0, 0 );
U"7" : mouseOut( 7, 0, 0 );
U"8" : mouseOut( 8, 0, 0 );
U"Z" : mouseOut( 9, 0, 0 );
U"X" : mouseOut( 10, 0, 0 );
U"C" : mouseOut( 11, 0, 0 );
U"V" : mouseOut( 12, 0, 0 );
U"B" : mouseOut( 13, 0, 0 );
U"N" : mouseOut( 14, 0, 0 );
U"M" : mouseOut( 15, 0, 0 );
U"Comma" : mouseOut( 16, 0, 0 );
U"Up" : mouseOut( 0, 0, 1 );
U"Down" : mouseOut( 0, 0, -1 );
U"Left" : mouseOut( 0, -1, 0 );
U"Right" : mouseOut( 0, 1, 0 );
U"W" : mouseOut( 0, 0, 5 );
U"S" : mouseOut( 0, 0, -5 );
U"A" : mouseOut( 0, -5, 0 );
U"D" : mouseOut( 0, 5, 0 );
# mouseWheelOut
# Arg1, vertical wheel, -127 to 127
# Arg2, horizontal wheel, -127 to 127
U"=" : mouseWheelOut( 1, 0 );
U"-" : mouseWheelOut( -1, 0 );
U"]" : mouseWheelOut( 0, 1 );
U"[" : mouseWheelOut( 0, -1 );
PK RKMb * kll/layouts/programmers_dvorak_default.kllName = programmers dvorak default layer;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2016";
KLL = 0.3d;
# Modified Date
Date = 2016-08-06;
# This the default layer for programmers dvorak
# http://www.kaufmann.no/roland/dvorak/
#
# CAVEATS;
# 1) Assumes US ANSI locale set on the host OS
# 2) No AltGr layer (future enhancement, will require a different locale)
# 3) Some shortcuts may not behave correctly
# Shift Keys are masked in some situations
#
# USAGE:
# This layer must be set on the defaultMap
# programmers_dvorak_shift must be set as Layer1 (first PartialMap)
# Attempt to force the host OS to use US ANSI
# (Not guaranteed)
keyboardLocale = 33;
### Mapping ###
# Top Row
U"BackTick" : '$';
U"1" : '&';
U"2" : '[';
U"3" : '{';
U"4" : '}';
U"5" : '(';
U"6" : '=';
U"7" : '*';
U"8" : ')';
U"9" : '+';
U"0" : ']';
U"-" : '!';
U"=" : '#';
# Top-Middle Row
U"Q" : ';';
U"W" : ',';
U"E" : '.';
U"R" : 'p';
U"T" : 'y';
U"Y" : 'f';
U"U" : 'g';
U"I" : 'c';
U"O" : 'r';
U"P" : 'l';
U"[" : '/';
U"]" : '@';
### / is in the same place
# Middle Row
### A is in the same place
U"S" : 'o';
U"D" : 'e';
U"F" : 'u';
U"G" : 'i';
U"H" : 'd';
U"J" : 'h';
U"K" : 't';
U"L" : 'n';
U"Semicolon" : 's';
U"Quote" : '-';
# Bottom-Middle Row
U"LShift" :+ U"Function1"; # Do not replace LShift, just trigger Function1 in addition to LShift
### XXX Not sure what the square key is (-HaaTa)
U"Z" : U"Quote"; # ' is hard to do using the single quote syntax
U"X" : 'q';
U"C" : 'j';
U"V" : 'k';
U"B" : 'x';
### M is in the same place
U"N" : 'b';
U"Comma" : 'w';
#U"Period" : 'v';
U"Slash" : 'z';
U"RShift" :+ U"Function1"; # Do not replace RShift, just trigger Function1 in addition to RShift
# Bottom Row
# N/A
PK RKMER ( kll/layouts/programmers_dvorak_shift.kllName = programmers dvorak shift layer;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2016";
KLL = 0.3d;
#
# NOTE: See programmers_dvorak_default.kll for more details on how to use this layout
# It relies on the semantics of the US ANSI layout
#
# This file relies heavily on the blockKey capability for LShift (0xE1) and RShift (0xE5)
# However, it is only used in the cases where it's necessary
#
# We also rely on "fall-through" to the previous layer
# This means any normally shifted keys do not need to be redefined here
#
# Modified Date
Date = 2016-08-06;
### Mapping ###
# Top Row
U"BackTick" : '~';
U"1" : '%';
U"2" : U"7" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"3" : U"5" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"4" : U"3" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"5" : U"1" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"6" : U"9" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"7" : U"0" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"8" : U"2" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"9" : U"4" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"0" : U"6" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"-" : U"8" + blockKey( 0xE1 ) + blockKey( 0xE5 );
U"=" : U"Backtick" + blockKey( 0xE1 ) + blockKey( 0xE5 );
# Top-Middle Row
U"]" : '^';
# Middle Row
# N/A
# Bottom-Middle Row
# N/A
# Bottom Row
# N/A
PK RKMÀ kll/layouts/remote_reload.kllName = RemoteReload;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-09-01;
# Bootloader Remote Reload Enable
flashModeEnabled = 1;
PK RKM[g2 2 kll/layouts/stdFuncMap.kllName = stdFuncMap;
Version = 0.3;
Author = "HaaTa (Jacob Alexander) 2014-2015";
KLL = 0.3c;
# Modified Date
Date = 2015-09-29;
# Maps each Function key incrementally to each layer
# Unused layers and functions are ignored
U"Function1" :: layerShift( 1 );
U"Function2" :: layerShift( 2 );
U"Function3" :: layerShift( 3 );
U"Function4" :: layerShift( 4 );
U"Function5" :: layerShift( 5 );
U"Function6" :: layerShift( 6 );
U"Function7" :: layerShift( 7 );
U"Function8" :: layerShift( 8 );
U"Function9" :: layerShift( 9 );
U"Function10" :: layerShift( 10 );
U"Function11" :: layerShift( 11 );
U"Function12" :: layerShift( 12 );
U"Function13" :: layerShift( 13 );
U"Function14" :: layerShift( 14 );
U"Function15" :: layerShift( 15 );
U"Function16" :: layerShift( 16 );
U"Lock1" :: layerLock( 1 );
U"Lock2" :: layerLock( 2 );
U"Lock3" :: layerLock( 3 );
U"Lock4" :: layerLock( 4 );
U"Lock5" :: layerLock( 5 );
U"Lock6" :: layerLock( 6 );
U"Lock7" :: layerLock( 7 );
U"Lock8" :: layerLock( 8 );
U"Lock9" :: layerLock( 9 );
U"Lock10" :: layerLock( 10 );
U"Lock11" :: layerLock( 11 );
U"Lock12" :: layerLock( 12 );
U"Lock13" :: layerLock( 13 );
U"Lock14" :: layerLock( 14 );
U"Lock15" :: layerLock( 15 );
U"Lock16" :: layerLock( 16 );
U"Latch1" :: layerLatch( 1 );
U"Latch2" :: layerLatch( 2 );
U"Latch3" :: layerLatch( 3 );
U"Latch4" :: layerLatch( 4 );
U"Latch5" :: layerLatch( 5 );
U"Latch6" :: layerLatch( 6 );
U"Latch7" :: layerLatch( 7 );
U"Latch8" :: layerLatch( 8 );
U"Latch9" :: layerLatch( 9 );
U"Latch10" :: layerLatch( 10 );
U"Latch11" :: layerLatch( 11 );
U"Latch12" :: layerLatch( 12 );
U"Latch13" :: layerLatch( 13 );
U"Latch14" :: layerLatch( 14 );
U"Latch15" :: layerLatch( 15 );
U"Latch16" :: layerLatch( 16 );
# Layer rotation
U"Next Layer" :: layerRotate( 0 ); # 0 is Next
U"Prev Layer" :: layerRotate( 1 ); # 1 is Previous
PK RKME kll/layouts/tab_function.kllName = tab_function;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5c;
# Modified Date
Date = 2017-09-24;
U"Tab" : U"Function1";
PK RKMQ4Ƴ kll/layouts/usb_debug.kll# USB Debug Settings
# These options limit the keyboard to only the keyboard descriptor.
# This means that many of the features will be disabled.
# However, it does help greatly when debugging USB issues.
Name = USBDebugSettings;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2018";
KLL = 0.5;
# Modified Date
Date = 2018-04-06;
## Settings ##
enableJoystick = 0;
enableMouse = 0;
enableVirtualSerialPort = 0;
enableRawIO = 0;
PK RKMu5 kll/layouts/whitefox.kllName = whitefox;
Version = 0.1;
Author = "Matt3o and HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-08-16;
# Number Row
U"1" : U"F1";
U"2" : U"F2";
U"3" : U"F3";
U"4" : U"F4";
U"5" : U"F5";
U"6" : U"F6";
U"7" : U"F7";
U"8" : U"F8";
U"9" : U"F9";
U"0" : U"F10";
U"-" : U"F11";
U"=" : U"F12";
U"Backspace" : U"Delete";
U"Backtick" : U"PrintScreen";
# Top Row
U"Delete" : CONS"Mute";
# Middle Row
U"PageUp" : CONS"VolumeUp";
# Bottom Row
U"Up" : U"PageUp";
U"PageDown" : CONS"VolumeDown";
# Space Row
U"Gui" : U"Menu";
U"Left" : U"Home";
U"Down" : U"PageDown";
U"Right" : U"End";
PK RKM?&V V kll/layouts/ic60/hhkbpro2.kllName = hhkbpro2;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3c;
# Modified Date
Date = 2015-10-12;
# Number Row
U"1" : U"F1";
U"2" : U"F2";
U"3" : U"F3";
U"4" : U"F4";
U"5" : U"F5";
U"6" : U"F6";
U"7" : U"F7";
U"8" : U"F8";
U"9" : U"F9";
U"0" : U"F10";
U"-" : U"F11";
U"=" : U"F12";
U"Backslash" : U"Insert";
U"Backtick" : U"Delete";
# Top Row
U"Tab" : U"Capslock";
U"W" : flashMode();
U"I" : U"PrintScreen";
U"O" : U"ScrollLock";
U"P" : U"Pause";
U"[" : U"Up";
# Middle Row
U"A" : CONS"VolumeDown";
U"S" : CONS"VolumeUp";
U"D" : CONS"Mute";
U"F" : CONS"Eject";
U"H" : U"Keypad Asterisk";
U"J" : U"Keypad Slash";
U"K" : U"Home";
U"L" : U"PageUp";
U";" : U"Left";
U"Quote" : U"Right";
# Bottom Row
U"N" : U"Keypad Plus";
U"M" : U"Keypad Minus";
U"Comma" : U"End";
U"Period" : U"PageDown";
U"Slash" : U"Down";
# Space Row
# N/A
PK RKM^ kll/layouts/ic60/md1Action.kllName = md1Overlay;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-08-16;
# Example of blocking ESC (0x29) when holding either Shift key
# Currently, kll only supports numbers (this may be fixed in a future kll spec)
U["LShift", "RShift"] : blockHold( 0x29 );
U"Esc" : blockKey( 0x29 );
# Now that Esc is blocked, it's possible to use this macro
U["LShift", "RShift"] + U"Esc" : '~';
U"Function2" : layerLock( 1 );
U"Function3" : action1();
U"Function4" : U"CapsLock";
PK RKM? kll/layouts/ic60/md1Overlay.kllName = md1Overlay;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2014";
KLL = 0.3;
# Modified Date
Date = 2014-11-12;
U"Function2" : layerLock( 1 );
U"Function3" : layerLatch( 1 );
U"Function4" : U"CapsLock";
PK RKMd& & ! kll/layouts/ic60_led/all-leds.kll# Turn all Infinity 60% LEDs on by default
Name = all-leds;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-10-31;
### Animations ###
## Usage ##
A[all_on] <= start, pfunc:interp;
# All LEDs on
A[all_on, 1] <= P[c:0%](255), P[c:100%](255);
PK RKM5;B* * ) kll/layouts/infinity_ergodox/all-leds.kll# Turn all Infinity Ergodox LEDs on by default
Name = all-leds;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-10-31;
### Animations ###
## Usage ##
A[all_on] <= start, pfunc:interp;
# All LEDs on
A[all_on, 1] <= P[c:0%](255), P[c:100%](255);
PK RKM%+s * kll/layouts/infinity_ergodox/iced_func.kllName = ICED Function Layer;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-08-21;
# Top Row - Left
U"1" : U"F1";
U"2" : U"F2";
U"3" : U"F3";
U"4" : U"F4";
U"5" : U"F5";
U"Esc" : U"F11";
# Top Row - Right
U"Function6" : U"F12";
U"6" : U"F6";
U"7" : U"F7";
U"8" : U"F8";
U"9" : U"F9";
U"0" : U"F10";
PK RKM1! ! , kll/layouts/infinity_ergodox/iced_numpad.kllName = ICED Numpad Layer;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-08-21;
# Top Row - Left
U"Equals" : flashMode();
# Top Row - Right
U"7" : U"Numlock";
U"8" : U"Keypad Slash";
U"9" : U"Keypad Asterisk";
U"0" : U"Keypad Minus";
# Top-Middle Row - Right
U"U" : U"Keypad 7";
U"I" : U"Keypad 8";
U"O" : U"Keypad 9";
U"P" : U"Keypad Plus";
# Middle Row - Right
U"J" : U"Keypad 4";
U"K" : U"Keypad 5";
U"L" : U"Keypad 6";
U"Semicolon" : U"Keypad Plus";
# Bottom-Middle Row - Right
U"M" : U"Keypad 1";
U"Comma" : U"Keypad 2";
U"Period" : U"Keypad 3";
U"Slash" : U"Keypad Enter";
# Bottom Row - Right
U"Up" : U"Keypad Period";
U"Right" : U"Keypad Enter";
# Bottom Thumb Cluster - Right
U"Space" : U"Keypad 0";
PK RKM[f1 f1 + kll/layouts/infinity_ergodox/lcdFuncMap.kllName = lcdFuncMap;
Version = 0.3;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-09-29;
# Maps each Function key incrementally to each layer
# Unused layers and functions are ignored
U"Function1" :: layerShift( 1 ) + LCDLayerDisplay();
U"Function2" :: layerShift( 2 ) + LCDLayerDisplay();
U"Function3" :: layerShift( 3 ) + LCDLayerDisplay();
U"Function4" :: layerShift( 4 ) + LCDLayerDisplay();
U"Function5" :: layerShift( 5 ) + LCDLayerDisplay();
U"Function6" :: layerShift( 6 ) + LCDLayerDisplay();
U"Function7" :: layerShift( 7 ) + LCDLayerDisplay();
U"Function8" :: layerShift( 8 ) + LCDLayerDisplay();
U"Function9" :: layerShift( 9 ) + LCDLayerDisplay();
U"Function10" :: layerShift( 10 ) + LCDLayerDisplay();
U"Function11" :: layerShift( 11 ) + LCDLayerDisplay();
U"Function12" :: layerShift( 12 ) + LCDLayerDisplay();
U"Function13" :: layerShift( 13 ) + LCDLayerDisplay();
U"Function14" :: layerShift( 14 ) + LCDLayerDisplay();
U"Function15" :: layerShift( 15 ) + LCDLayerDisplay();
U"Function16" :: layerShift( 16 ) + LCDLayerDisplay();
U"Lock1" :: layerLock( 1 ) + LCDLayerDisplay();
U"Lock2" :: layerLock( 2 ) + LCDLayerDisplay();
U"Lock3" :: layerLock( 3 ) + LCDLayerDisplay();
U"Lock4" :: layerLock( 4 ) + LCDLayerDisplay();
U"Lock5" :: layerLock( 5 ) + LCDLayerDisplay();
U"Lock6" :: layerLock( 6 ) + LCDLayerDisplay();
U"Lock7" :: layerLock( 7 ) + LCDLayerDisplay();
U"Lock8" :: layerLock( 8 ) + LCDLayerDisplay();
U"Lock9" :: layerLock( 9 ) + LCDLayerDisplay();
U"Lock10" :: layerLock( 10 ) + LCDLayerDisplay();
U"Lock11" :: layerLock( 11 ) + LCDLayerDisplay();
U"Lock12" :: layerLock( 12 ) + LCDLayerDisplay();
U"Lock13" :: layerLock( 13 ) + LCDLayerDisplay();
U"Lock14" :: layerLock( 14 ) + LCDLayerDisplay();
U"Lock15" :: layerLock( 15 ) + LCDLayerDisplay();
U"Lock16" :: layerLock( 16 ) + LCDLayerDisplay();
U"Latch1" :: layerLatch( 1 ) + LCDLayerDisplay();
U"Latch2" :: layerLatch( 2 ) + LCDLayerDisplay();
U"Latch3" :: layerLatch( 3 ) + LCDLayerDisplay();
U"Latch4" :: layerLatch( 4 ) + LCDLayerDisplay();
U"Latch5" :: layerLatch( 5 ) + LCDLayerDisplay();
U"Latch6" :: layerLatch( 6 ) + LCDLayerDisplay();
U"Latch7" :: layerLatch( 7 ) + LCDLayerDisplay();
U"Latch8" :: layerLatch( 8 ) + LCDLayerDisplay();
U"Latch9" :: layerLatch( 9 ) + LCDLayerDisplay();
U"Latch10" :: layerLatch( 10 ) + LCDLayerDisplay();
U"Latch11" :: layerLatch( 11 ) + LCDLayerDisplay();
U"Latch12" :: layerLatch( 12 ) + LCDLayerDisplay();
U"Latch13" :: layerLatch( 13 ) + LCDLayerDisplay();
U"Latch14" :: layerLatch( 14 ) + LCDLayerDisplay();
U"Latch15" :: layerLatch( 15 ) + LCDLayerDisplay();
U"Latch16" :: layerLatch( 16 ) + LCDLayerDisplay();
# Layer rotation
U"Next Layer" :: layerRotate( 0 ) + LCDLayerDisplay(); # 0 is Next
U"Prev Layer" :: layerRotate( 1 ) + LCDLayerDisplay(); # 1 is Previous
# Colours assigned to each of the LCD numbers
# The "top of stack" layer is the colour used
STLcdNumber0Color => STLcdNumber0Color_define;
STLcdNumber1Color => STLcdNumber1Color_define;
STLcdNumber2Color => STLcdNumber2Color_define;
STLcdNumber3Color => STLcdNumber3Color_define;
STLcdNumber4Color => STLcdNumber4Color_define;
STLcdNumber5Color => STLcdNumber5Color_define;
STLcdNumber6Color => STLcdNumber6Color_define;
STLcdNumber7Color => STLcdNumber7Color_define;
STLcdNumber8Color => STLcdNumber8Color_define;
STLcdNumber9Color => STLcdNumber9Color_define;
# Brightness reduced to 75% and converted to 16-bit colour
# <8-bit>/0xFF * 0xFFFF * 0.75
STLcdNumber1Color = "0xA8A8, 0x2727, 0x1FA0"; # Taylor Swift's Lips #E0342A
STLcdNumber2Color = "0x4B4B, 0x8D8D, 0x34B5"; # Matrix Green #64BC46
STLcdNumber3Color = "0x0000, 0x8242, 0xB3F3"; # Smart Water Blue #00ADEF
STLcdNumber4Color = "0xF6F6, 0xA5A5, 0x4949"; # Fanta Orange #F6A549 (boosted to 100%)
STLcdNumber5Color = "0xB7B7, 0x5D5D, 0x8545"; # Nicki Minaj Pink #F47CB1
STLcdNumber6Color = "0xBCFC, 0xB6F6, 0x2D2D"; # Iowa Corn Yellow #FBDF3C
STLcdNumber7Color = "0x00C1, 0x7E7E, 0x3C3C"; # Sprite Green #01A850
STLcdNumber8Color = "0x8303, 0x1394, 0xB9F9"; # Fanta Purple #AE1AF7
STLcdNumber9Color = "0x09CA, 0x12D3, 0x8484"; # Red Bull Blue #0D19B0
STLcdNumber0Color = "0xB939, 0xAAEA, 0x8D8D"; # Butter Cow Off-white #F6E3BC
# Basic 32x32 numbers used to display layer stack
# Up to 4 of these numbers can be shown at a time on the LCD
STLcdNumber0 => STLcdNumber0_define;
STLcdNumber1 => STLcdNumber1_define;
STLcdNumber2 => STLcdNumber2_define;
STLcdNumber3 => STLcdNumber3_define;
STLcdNumber4 => STLcdNumber4_define;
STLcdNumber5 => STLcdNumber5_define;
STLcdNumber6 => STLcdNumber6_define;
STLcdNumber7 => STLcdNumber7_define;
STLcdNumber8 => STLcdNumber8_define;
STLcdNumber9 => STLcdNumber9_define;
STLcdNumber0 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber1 = "
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
";
STLcdNumber2 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber3 = "
0x00, 0x00, 0x00, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber4 = "
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber5 = "
0x00, 0x00, 0x00, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0x00, 0x00, 0x00,
";
STLcdNumber6 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0x00, 0x00, 0x00,
";
STLcdNumber7 = "
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber8 = "
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
STLcdNumber9 = "
0x00, 0x00, 0x00, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00,
";
PK RKME(* * / kll/layouts/infinity_ergodox/mdergo1Overlay.kllName = mdergo1Overlay;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2015";
KLL = 0.3c;
# Modified Date
Date = 2015-09-20;
# Make default layout more like the original ErgoDox default layout
# https://keyboard-configurator.massdrop.com/ext/ergodox/
#
# The defaultMap.kll in BaseMap cannot have any duplicate keys (or they cannot be fully remapped later)
# But, the DefaultMap can start to duplicate keys without any issues
# Top Row - Right
U"Function6" : U"Lock2";
# Top-Middle Row - Left
U"Function1" : U"Lock1";
# Bottom-Middle Row - Left
U"Function2" : U"Function1"; # Set Function2 to be layer shift 1
# Bottom-Middle Row - Right
U"Function7" : U"Function1"; # Set Function7 to be layer shift 1
# Bottom Row - Left
U"Function3" : U"Backslash";
U"Function4" : U"Left";
U"Function5" : U"Right";
PK RKM|IL $ kll/layouts/k-type/color_painter.kll# Color Painter Example Configuration
Name = Color Painter Animation Example;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-02-08;
PK RKMz
! kll/layouts/k-type/full_cross.kll# Full Cross Animation
Name = "Full Cross Animation";
Version = 0.3;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-10-08;
### Animations ###
## Usage Examples ##
# A[full_cross_white] <= replace:basic, loops:2, framedelay:1;
# U["P"] :+ A[full_cross_white](start);
#
# A[full_dark_cross] <= replace:basic;
# U["O"] :+ A[full_dark_cross](start);
# Full Cross White
A[full_cross_white, 1] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 2] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 3] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 4] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 5] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 6] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 7] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 8] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 9] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 10] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[full_cross_white, 11] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 12] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 13] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 14] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 15] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 16] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 17] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 18] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 19] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[full_cross_white, 20] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
# Full Dark Cross
# Works best with another animation already playing underneath
A[full_dark_cross, 1] <= P[r:i](-:255,-:255,-:255), P[c:i](-:255,-:255,-:255);
A[full_dark_cross, 2] <= P[r:i](-:225,-:225,-:225), P[c:i](-:225,-:225,-:225);
A[full_dark_cross, 3] <= P[r:i](-:200,-:200,-:200), P[c:i](-:200,-:200,-:200);
A[full_dark_cross, 4] <= P[r:i](-:175,-:175,-:175), P[c:i](-:175,-:175,-:175);
A[full_dark_cross, 5] <= P[r:i](-:150,-:150,-:150), P[c:i](-:150,-:150,-:150);
A[full_dark_cross, 6] <= P[r:i](-:125,-:125,-:125), P[c:i](-:125,-:125,-:125);
A[full_dark_cross, 7] <= P[r:i](-:100,-:100,-:100), P[c:i](-:100,-:100,-:100);
A[full_dark_cross, 8] <= P[r:i](-:75,-:75,-:75), P[c:i](-:75,-:75,-:75);
A[full_dark_cross, 9] <= P[r:i](-:50,-:50,-:50), P[c:i](-:50,-:50,-:50);
A[full_dark_cross, 10] <= P[r:i](-:25,-:25,-:25), P[c:i](-:25,-:25,-:25);
PK RKM4g( g( kll/layouts/k-type/miami.kll# Miami Animation
Name = "Miami Animation";
Version = 0.4;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-10-08;
### Animations ###
## Usage Examples ##
# A[rainbow_wave] <= start, framedelay:3, framestretch, loop, replace:all, pfunc:interp;
# Rainbow wave
A[rainbow_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,255,255), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,255,255), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[rainbow_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,255,255), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[rainbow_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,255,255), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[rainbow_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,255,255), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[rainbow_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,255,255), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[rainbow_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,255,255), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[rainbow_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,255,255), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[rainbow_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,255,255), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,255,255), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,255,255), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,255,255), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,255,255), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[rainbow_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,255,255), P[c:100%](127,0,255);
A[rainbow_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,255,255), P[c:102%](127,0,255);
A[rainbow_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,255,255), P[c:104%](127,0,255);
A[rainbow_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,255,255), P[c:106%](127,0,255);
A[rainbow_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,255,255), P[c:108%](127,0,255);
A[rainbow_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,255,255), P[c:110%](127,0,255);
A[rainbow_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,255,255), P[c:112%](127,0,255);
A[rainbow_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,255,255), P[c:114%](127,0,255);
A[rainbow_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,255,255), P[c:116%](127,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,255,255), P[c:118%](127,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,255,255), P[c:120%](127,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,255,255), P[c:122%](127,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,255,255), P[c:124%](127,0,255);
A[rainbow_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,255,255);
A[rainbow_wave, 29] <= P[c:-24%](255,255,255), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,255,255);
A[rainbow_wave, 30] <= P[c:-22%](255,255,255), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,255,255);
A[rainbow_wave, 31] <= P[c:-20%](255,255,255), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,255,255);
A[rainbow_wave, 32] <= P[c:-18%](255,255,255), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,255,255);
A[rainbow_wave, 33] <= P[c:-16%](255,255,255), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,255,255);
A[rainbow_wave, 34] <= P[c:-14%](255,255,255), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,255,255);
A[rainbow_wave, 35] <= P[c:-12%](255,255,255), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,255,255);
A[rainbow_wave, 36] <= P[c:-10%](255,255,255), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,255,255);
A[rainbow_wave, 37] <= P[c:-8%] (255,255,255), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,255,255);
A[rainbow_wave, 38] <= P[c:-6%] (255,255,255), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,255,255);
A[rainbow_wave, 39] <= P[c:-4%] (255,255,255), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,255,255);
A[rainbow_wave, 40] <= P[c:-2%] (255,255,255), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,255,255);
A[rainbow_wave, 41] <= P[c:0%] (255,255,255), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,255,255), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,255,255), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,255,255), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,255,255), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,255,255), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,255,255), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,255,255), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,255,255), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,255,255), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,255,255), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,255,255), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,255,255), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,255,255), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[rainbow_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,255,255), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[rainbow_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,255,255), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[rainbow_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,255,255), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[rainbow_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,255,255), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[rainbow_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,255,255), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[rainbow_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,255,255), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[rainbow_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,255,255), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[rainbow_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,255,255), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[rainbow_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,255,255), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[rainbow_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,255,255), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[rainbow_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,255,255), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[rainbow_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,255,255), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
PK RKM6`{ { # kll/layouts/k-type/rainbow_wipe.kll# Rainbow Wipe Animation
Name = Rainbow Wipe Animation;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-10-08;
### Animations ###
## Usage Examples ##
# U["A"-"Z"] :+ A[rainbow_wipe](loops:1);
# U["Print"] :+ A[rainbow_wipe]();
# U["Pause"] :+ A[rainbow_wipe](framedelay:0, loops:1, replace:all);
# Rainbow Wipe Animation
A[rainbow_wipe] <= loop, framestretch, framedelay:1, replace:basic, pfunc:interp;
A[rainbow_wipe, 1] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_wipe, 1] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255);
A[rainbow_wipe, 2] <= P[c:0%](255,0,0), P[c:27%](255,255,0), P[c:52%](0,255,0), P[c:77%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_wipe, 2] <= P[c:0%](255,0,0), P[c:27%](255,255,0), P[c:52%](0,255,0), P[c:77%](0,0,255);
A[rainbow_wipe, 3] <= P[c:0%](255,0,0), P[c:29%](255,255,0), P[c:54%](0,255,0), P[c:79%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_wipe, 3] <= P[c:0%](255,0,0), P[c:29%](255,255,0), P[c:54%](0,255,0), P[c:79%](0,0,255);
A[rainbow_wipe, 4] <= P[c:0%](255,0,0), P[c:31%](255,255,0), P[c:56%](0,255,0), P[c:81%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_wipe, 4] <= P[c:0%](255,0,0), P[c:31%](255,255,0), P[c:56%](0,255,0), P[c:81%](0,0,255);
#A[rainbow_wipe, 4] <= P[c:56%](0,255,0), P[c:81%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 5] <= P[c:0%](255,0,0), P[c:33%](255,255,0), P[c:58%](0,255,0), P[c:83%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 6] <= P[c:0%](255,0,0), P[c:35%](255,255,0), P[c:60%](0,255,0), P[c:85%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 7] <= P[c:0%](255,0,0), P[c:37%](255,255,0), P[c:62%](0,255,0), P[c:87%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 8] <= P[c:0%](255,0,0), P[c:39%](255,255,0), P[c:64%](0,255,0), P[c:89%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 9] <= P[c:0%](255,0,0), P[c:41%](255,255,0), P[c:66%](0,255,0), P[c:91%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 10] <= P[c:0%](255,0,0), P[c:43%](255,255,0), P[c:68%](0,255,0), P[c:93%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 11] <= P[c:0%](255,0,0), P[c:45%](255,255,0), P[c:70%](0,255,0), P[c:95%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 12] <= P[c:0%](255,0,0), P[c:47%](255,255,0), P[c:72%](0,255,0), P[c:97%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 13] <= P[c:0%](255,0,0), P[c:45%](255,255,0), P[c:70%](0,255,0), P[c:95%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 14] <= P[c:0%](255,0,0), P[c:43%](255,255,0), P[c:68%](0,255,0), P[c:93%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 15] <= P[c:0%](255,0,0), P[c:41%](255,255,0), P[c:66%](0,255,0), P[c:91%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 16] <= P[c:0%](255,0,0), P[c:39%](255,255,0), P[c:64%](0,255,0), P[c:89%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 17] <= P[c:0%](255,0,0), P[c:37%](255,255,0), P[c:62%](0,255,0), P[c:87%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 18] <= P[c:0%](255,0,0), P[c:35%](255,255,0), P[c:60%](0,255,0), P[c:85%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 19] <= P[c:0%](255,0,0), P[c:33%](255,255,0), P[c:58%](0,255,0), P[c:83%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 20] <= P[c:0%](255,0,0), P[c:31%](255,255,0), P[c:56%](0,255,0), P[c:81%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 21] <= P[c:0%](255,0,0), P[c:29%](255,255,0), P[c:54%](0,255,0), P[c:79%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 22] <= P[c:0%](255,0,0), P[c:27%](255,255,0), P[c:52%](0,255,0), P[c:77%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 23] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 24] <= P[c:0%](255,0,0), P[c:23%](255,255,0), P[c:47%](0,255,0), P[c:73%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 25] <= P[c:0%](255,0,0), P[c:21%](255,255,0), P[c:44%](0,255,0), P[c:71%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 26] <= P[c:0%](255,0,0), P[c:19%](255,255,0), P[c:41%](0,255,0), P[c:69%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 27] <= P[c:0%](255,0,0), P[c:17%](255,255,0), P[c:38%](0,255,0), P[c:67%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 28] <= P[c:0%](255,0,0), P[c:15%](255,255,0), P[c:35%](0,255,0), P[c:65%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 29] <= P[c:0%](255,0,0), P[c:13%](255,255,0), P[c:33%](0,255,0), P[c:63%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 30] <= P[c:0%](255,0,0), P[c:11%](255,255,0), P[c:30%](0,255,0), P[c:61%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 31] <= P[c:0%](255,0,0), P[c:9%](255,255,0), P[c:27%](0,255,0), P[c:59%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 32] <= P[c:0%](255,0,0), P[c:7%](255,255,0), P[c:24%](0,255,0), P[c:57%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 33] <= P[c:0%](255,0,0), P[c:5%](255,255,0), P[c:21%](0,255,0), P[c:55%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 34] <= P[c:0%](255,0,0), P[c:3%](255,255,0), P[c:18%](0,255,0), P[c:53%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 35] <= P[c:0%](255,0,0), P[c:3%](255,255,0), P[c:18%](0,255,0), P[c:53%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 36] <= P[c:0%](255,0,0), P[c:3%](255,255,0), P[c:18%](0,255,0), P[c:53%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 37] <= P[c:0%](255,0,0), P[c:10%](255,255,0), P[c:20%](0,255,0), P[c:60%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 38] <= P[c:0%](255,0,0), P[c:20%](255,255,0), P[c:40%](0,255,0), P[c:70%](0,0,255), P[c:100%](127,0,255);
A[rainbow_wipe, 39] <= P[c:0%](255,0,0), P[c:25%](255,255,0), P[c:50%](0,255,0), P[c:75%](0,0,255), P[c:100%](127,0,255);
#A[rainbow_wipe, 40] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
PK RKMTuX( X( kll/layouts/k-type/release.1.kll# Animation Example Configuration
Name = "Release 1";
Version = 1.0;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-09-06;
### Animations ###
# Rainbow wave
A[rainbow_wave] <= start, framedelay:3, framestretch, loop, replace:all, pfunc:interp;
A[rainbow_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,255,255), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,255,255), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[rainbow_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,255,255), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[rainbow_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,255,255), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[rainbow_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,255,255), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[rainbow_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,255,255), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[rainbow_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,255,255), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[rainbow_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,255,255), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[rainbow_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,255,255), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,255,255), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,255,255), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,255,255), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,255,255), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[rainbow_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,255,255), P[c:100%](127,0,255);
A[rainbow_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,255,255), P[c:102%](127,0,255);
A[rainbow_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,255,255), P[c:104%](127,0,255);
A[rainbow_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,255,255), P[c:106%](127,0,255);
A[rainbow_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,255,255), P[c:108%](127,0,255);
A[rainbow_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,255,255), P[c:110%](127,0,255);
A[rainbow_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,255,255), P[c:112%](127,0,255);
A[rainbow_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,255,255), P[c:114%](127,0,255);
A[rainbow_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,255,255), P[c:116%](127,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,255,255), P[c:118%](127,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,255,255), P[c:120%](127,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,255,255), P[c:122%](127,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,255,255), P[c:124%](127,0,255);
A[rainbow_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,255,255);
A[rainbow_wave, 29] <= P[c:-24%](255,255,255), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,255,255);
A[rainbow_wave, 30] <= P[c:-22%](255,255,255), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,255,255);
A[rainbow_wave, 31] <= P[c:-20%](255,255,255), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,255,255);
A[rainbow_wave, 32] <= P[c:-18%](255,255,255), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,255,255);
A[rainbow_wave, 33] <= P[c:-16%](255,255,255), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,255,255);
A[rainbow_wave, 34] <= P[c:-14%](255,255,255), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,255,255);
A[rainbow_wave, 35] <= P[c:-12%](255,255,255), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,255,255);
A[rainbow_wave, 36] <= P[c:-10%](255,255,255), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,255,255);
A[rainbow_wave, 37] <= P[c:-8%] (255,255,255), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,255,255);
A[rainbow_wave, 38] <= P[c:-6%] (255,255,255), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,255,255);
A[rainbow_wave, 39] <= P[c:-4%] (255,255,255), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,255,255);
A[rainbow_wave, 40] <= P[c:-2%] (255,255,255), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,255,255);
A[rainbow_wave, 41] <= P[c:0%] (255,255,255), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,255,255), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,255,255), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,255,255), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,255,255), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,255,255), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,255,255), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,255,255), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,255,255), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,255,255), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,255,255), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,255,255), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,255,255), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,255,255), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[rainbow_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,255,255), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[rainbow_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,255,255), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[rainbow_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,255,255), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[rainbow_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,255,255), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[rainbow_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,255,255), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[rainbow_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,255,255), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[rainbow_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,255,255), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[rainbow_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,255,255), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[rainbow_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,255,255), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[rainbow_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,255,255), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[rainbow_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,255,255), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[rainbow_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,255,255), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
PK RKMf kll/layouts/k-type/streak.kll# Streak Animation
Name = Streak Animation;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-10-08;
### Animations ###
## Usage Examples ##
# U["1"] :+ A[streak](framedelay:1, loops:1);
# U["2"] :+ A[streak](framedelay:1, loops:2);
# U["3"] :+ A[streak](framedelay:1, loops:3);
# U["4"] :+ A[streak](framedelay:1, loops:4);
# U["5"] :+ A[streak](framedelay:1, loops:5);
# U["6"] :+ A[streak](framedelay:1, loops:6);
# U["7"] :+ A[streak](framedelay:1, loops:7);
# U["8"] :+ A[streak](framedelay:1, loops:8);
# U["9"] :+ A[streak](framedelay:1, loops:9);
# U["0"] :+ A[streak](loops:1);
# Underlighting streak
A[streak] <= loops:1, replace:stack;
A[streak, 1] <=
P[102](+:91,+:50,+:86),
P[103](+:255,+:255,+:255),
P[104](-:55,-:100,-:100),
P[105](-:100,-:100,-:100),
P[106](-:100,-:100,-:100);
A[streak, 2] <=
P[101](+:91,+:50,+:86),
P[102](+:255,+:255,+:255),
P[103](-:55,-:100,-:100),
P[104](-:100,-:100,-:100),
P[105](-:100,-:100,-:100);
A[streak, 3] <=
P[100](+:91,+:50,+:86),
P[101](+:255,+:255,+:255),
P[102](-:55,-:100,-:100),
P[103](-:100,-:100,-:100),
P[104](-:100,-:100,-:100);
A[streak, 4] <=
P[99](+:91,+:50,+:86),
P[100](+:255,+:255,+:255),
P[101](-:55,-:100,-:100),
P[102](-:100,-:100,-:100),
P[103](-:100,-:100,-:100);
A[streak, 5] <=
P[98](+:91,+:50,+:86),
P[99](+:255,+:255,+:255),
P[100](-:55,-:100,-:100),
P[101](-:100,-:100,-:100),
P[102](-:100,-:100,-:100);
A[streak, 6] <=
P[97](+:91,+:50,+:86),
P[98](+:255,+:255,+:255),
P[99](-:55,-:100,-:100),
P[100](-:100,-:100,-:100),
P[101](-:100,-:100,-:100);
A[streak, 7] <=
P[96](+:91,+:50,+:86),
P[97](+:255,+:255,+:255),
P[98](-:55,-:100,-:100),
P[99](-:100,-:100,-:100),
P[100](-:100,-:100,-:100);
A[streak, 8] <=
P[95](+:91,+:50,+:86),
P[96](+:255,+:255,+:255),
P[97](-:55,-:100,-:100),
P[98](-:100,-:100,-:100),
P[99](-:100,-:100,-:100);
A[streak, 9] <=
P[94](+:91,+:50,+:86),
P[95](+:255,+:255,+:255),
P[96](-:55,-:100,-:100),
P[97](-:100,-:100,-:100),
P[98](-:100,-:100,-:100);
A[streak, 10] <=
P[93](+:91,+:50,+:86),
P[94](+:255,+:255,+:255),
P[95](-:55,-:100,-:100),
P[96](-:100,-:100,-:100),
P[97](-:100,-:100,-:100);
A[streak, 11] <=
P[92](+:91,+:50,+:86),
P[93](+:255,+:255,+:255),
P[94](-:55,-:100,-:100),
P[95](-:100,-:100,-:100),
P[96](-:100,-:100,-:100);
A[streak, 12] <=
P[91](+:91,+:50,+:86),
P[92](+:255,+:255,+:255),
P[93](-:55,-:100,-:100),
P[94](-:100,-:100,-:100),
P[95](-:100,-:100,-:100);
A[streak, 13] <=
P[90](+:91,+:50,+:86),
P[91](+:255,+:255,+:255),
P[92](-:55,-:100,-:100),
P[93](-:100,-:100,-:100),
P[94](-:100,-:100,-:100);
A[streak, 14] <=
P[89](+:91,+:50,+:86),
P[90](+:255,+:255,+:255),
P[91](-:55,-:100,-:100),
P[92](-:100,-:100,-:100),
P[93](-:100,-:100,-:100);
A[streak, 15] <=
P[88](+:91,+:50,+:86),
P[89](+:255,+:255,+:255),
P[90](-:55,-:100,-:100),
P[91](-:100,-:100,-:100),
P[92](-:100,-:100,-:100);
A[streak, 16] <=
P[119](+:91,+:50,+:86),
P[88](+:255,+:255,+:255),
P[89](-:55,-:100,-:100),
P[90](-:100,-:100,-:100),
P[91](-:100,-:100,-:100);
A[streak, 17] <=
P[118](+:91,+:50,+:86),
P[119](+:255,+:255,+:255),
P[88](-:55,-:100,-:100),
P[89](-:100,-:100,-:100),
P[90](-:100,-:100,-:100);
A[streak, 18] <=
P[117](+:91,+:50,+:86),
P[118](+:255,+:255,+:255),
P[119](-:55,-:100,-:100),
P[88](-:100,-:100,-:100),
P[89](-:100,-:100,-:100);
A[streak, 19] <=
P[116](+:91,+:50,+:86),
P[117](+:255,+:255,+:255),
P[118](-:55,-:100,-:100),
P[119](-:100,-:100,-:100),
P[88](-:100,-:100,-:100);
A[streak, 20] <=
P[115](+:91,+:50,+:86),
P[116](+:255,+:255,+:255),
P[117](-:55,-:100,-:100),
P[118](-:100,-:100,-:100),
P[119](-:100,-:100,-:100);
A[streak, 21] <=
P[114](+:91,+:50,+:86),
P[115](+:255,+:255,+:255),
P[116](-:55,-:100,-:100),
P[117](-:100,-:100,-:100),
P[118](-:100,-:100,-:100);
A[streak, 22] <=
P[113](+:91,+:50,+:86),
P[114](+:255,+:255,+:255),
P[115](-:55,-:100,-:100),
P[116](-:100,-:100,-:100),
P[117](-:100,-:100,-:100);
A[streak, 23] <=
P[112](+:91,+:50,+:86),
P[113](+:255,+:255,+:255),
P[114](-:55,-:100,-:100),
P[115](-:100,-:100,-:100),
P[116](-:100,-:100,-:100);
A[streak, 24] <=
P[111](+:91,+:50,+:86),
P[112](+:255,+:255,+:255),
P[113](-:55,-:100,-:100),
P[114](-:100,-:100,-:100),
P[115](-:100,-:100,-:100);
A[streak, 25] <=
P[110](+:91,+:50,+:86),
P[111](+:255,+:255,+:255),
P[112](-:55,-:100,-:100),
P[113](-:100,-:100,-:100),
P[114](-:100,-:100,-:100);
A[streak, 26] <=
P[109](+:91,+:50,+:86),
P[110](+:255,+:255,+:255),
P[111](-:55,-:100,-:100),
P[112](-:100,-:100,-:100),
P[113](-:100,-:100,-:100);
A[streak, 27] <=
P[108](+:91,+:50,+:86),
P[109](+:255,+:255,+:255),
P[110](-:55,-:100,-:100),
P[111](-:100,-:100,-:100),
P[112](-:100,-:100,-:100);
A[streak, 28] <=
P[107](+:91,+:50,+:86),
P[108](+:255,+:255,+:255),
P[109](-:55,-:100,-:100),
P[110](-:100,-:100,-:100),
P[111](-:100,-:100,-:100);
A[streak, 29] <=
P[106](+:91,+:50,+:86),
P[107](+:255,+:255,+:255),
P[108](-:55,-:100,-:100),
P[109](-:100,-:100,-:100),
P[110](-:100,-:100,-:100);
A[streak, 30] <=
P[105](+:91,+:50,+:86),
P[106](+:255,+:255,+:255),
P[107](-:55,-:100,-:100),
P[108](-:100,-:100,-:100),
P[109](-:100,-:100,-:100);
A[streak, 31] <=
P[104](+:91,+:50,+:86),
P[105](+:255,+:255,+:255),
P[106](-:55,-:100,-:100),
P[107](-:100,-:100,-:100),
P[108](-:100,-:100,-:100);
A[streak, 32] <=
P[103](+:91,+:50,+:86),
P[104](+:255,+:255,+:255),
P[105](-:55,-:100,-:100),
P[106](-:100,-:100,-:100),
P[107](-:100,-:100,-:100);
# Fade-out
A[streak, 33] <=
P[103](+:255,+:255,+:255),
P[104](-:55,-:100,-:100),
P[105](-:100,-:100,-:100),
P[106](-:200,-:200,-:200);
A[streak, 33] <=
P[103](-:55,-:100,-:100),
P[104](-:100,-:100,-:100),
P[105](-:200,-:200,-:200);
A[streak, 34] <=
P[103](-:100,-:100,-:100),
P[104](-:200,-:200,-:200);
A[streak, 35] <=
P[103](-:200,-:200,-:200);
A[streak, 36] <=
P[106](0,0,0);
PK RKMvH kll/layouts/k-type/unset_v1.kll# Unset Animations Example Configuration
Name = Unset Animations Example;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-02-08;
U"A" : U"A";
U"B" : U"B";
U"C" : U"C";
U"D" : U"D";
U"E" : U"E";
U"F" : U"F";
U"G" : U"G";
U"H" : U"H";
U"I" : U"I";
U"J" : U"J";
U"K" : U"K";
U"L" : U"L";
U"M" : U"M";
U"N" : U"N";
U"O" : U"O";
U"P" : U"P";
U"Q" : U"Q";
U"R" : U"R";
U"S" : U"S";
U"T" : U"T";
U"U" : U"U";
U"V" : U"V";
U"W" : U"W";
U"X" : U"X";
U"Y" : U"Y";
U"Z" : U"Z";
U"Backtick" : U"Backtick";
U"1" : U"1";
U"2" : U"2";
U"3" : U"3";
U"4" : U"4";
U"5" : U"5";
U"6" : U"6";
U"7" : U"7";
U"8" : U"8";
U"9" : U"9";
U"0" : U"0";
U"Minus" : U"Minus";
U"Equals" : U"Equals";
U"Backspace" : U"Backspace";
U"Insert" : U"Insert";
U"Home" : U"Home";
U"PageUp" : U"PageUp";
U"Delete" : U"Delete";
U"End" : U"End";
U"PageDown" : U"PageDown";
U"Tab" : U"Tab";
U"LBrace" : U"LBrace";
U"RBrace" : U"RBrace";
U"Backslash" : U"Backslash";
U"CapsLock" : U"CapsLock";
U"Semicolon" : U"Semicolon";
U"Quote" : U"Quote";
U"Enter" : U"Enter";
U"LShift" : U"LShift";
U"Comma" : U"Comma";
U"Period" : U"Period";
U"Slash" : U"Slash";
U"RShift" : U"RShift";
U"LCtrl" : U"LCtrl";
U"LGui" : U"LGui";
U"LAlt" : U"LAlt";
U"Space" : U"Space";
U"RAlt" : U"RAlt";
U"RGui" : U"RGui";
# These are used as the layer locks, don't unset them
#U"App" : U"App";
#U"RCtrl" : U"RCtrl";
U"Esc" : U"Esc";
U"F1" : U"F1";
U"F2" : U"F2";
U"F3" : U"F3";
U"F4" : U"F4";
U"F5" : U"F5";
U"F6" : U"F6";
U"F7" : U"F7";
U"F8" : U"F8";
U"F9" : U"F9";
U"F10" : U"F10";
U"F11" : U"F11";
U"F12" : U"F12";
U"Print" : U"Print";
U"ScrollLock" : U"ScrollLock";
U"Pause" : U"Pause";
U"Up" : U"Up";
U"Down" : U"Down";
U"Left" : U"Left";
U"Right" : U"Right";
PK RKMEOr r kll/layouts/k-type/vmw.kll# Animation Example Configuration
Name = VMW Demo;
Version = 0.1;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-08-23;
animation_test_layout => animation_test_layout_define;
animation_test_layout = 1;
### Animations ###
# Dark Ripple
#A[dark_ripple] <= replace:basic;
#A[dark_ripple, 1] <=
# Relative animation test
A[relative_add] <= replace:basic, loops:2, framedelay:1;
A[relative_add, 1] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 2] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 3] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 4] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 5] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 6] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 7] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 8] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 9] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 10] <= P[r:i](+:10,+:10,+:10), P[c:i](+:10,+:10,+:10);
A[relative_add, 11] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 12] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 13] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 14] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 15] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 16] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 17] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 18] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 19] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
A[relative_add, 20] <= P[r:i](-:10,-:10,-:10), P[c:i](-:10,-:10,-:10);
#A[relative_add, 21] <= P[r:i,c:i](50,50,50);
#A[relative_add, 2] <= P[c:i](+100,+100,+100);
#A[relative_add, 3] <= P[r:i,c:i](+100,+100,+100);
# TODO Move to U"CapsLock" when working
A[caps_lock] <= loop, replace:all;
A[caps_lock, 1] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 2] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 3] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 4] <= S[0x36](+:50,+:50,+:50);
A[caps_lock, 5] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 6] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 7] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 5] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 6] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 8] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 9] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 10] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 11] <= S[0x36](-:20,-:20,-:20);
A[caps_lock, 12] <= S[0x36](-:20,-:20,-:20);
# TODO Move to U"ScrollLock" when working
A[scroll_lock] <= loop, replace:all;
A[scroll_lock, 1] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 2] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 3] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 4] <= S[0xF](+:50,+:50,+:50);
A[scroll_lock, 5] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 6] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 7] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 5] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 6] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 8] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 9] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 10] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 11] <= S[0xF](-:20,-:20,-:20);
A[scroll_lock, 12] <= S[0xF](-:20,-:20,-:20);
# Lock Keys
A[lock_event] <= loops:1, replace:all, framedelay:1, replace:basic;
A[lock_event, 1] <= P[r:i,c:i](+:100,+:100,+:100);
A[lock_event, 2] <= P[r:i,c:i](+:100,+:100,+:100);
A[lock_event, 3] <= P[r:i,c:i](+:100,+:100,+:100);
A[lock_event, 4] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 5] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 6] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 7] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 8] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 9] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 10] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 11] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 12] <= P[r:i,c:i](+:0,+:0,+:0);
A[lock_event, 13] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 14] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 15] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 16] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 17] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 18] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 19] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 20] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 21] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 22] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 23] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 24] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 25] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 26] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 27] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 28] <= P[r:i,c:i](-:10,-:10,-:10);
A[lock_event, 29] <= P[r:i,c:i](-:10,-:10,-:10);
# Underlighting streak
A[under_streak] <= loops:1, replace:stack;
A[under_streak, 1] <=
P[102](+:91,+:50,+:86),
P[103](+:255,+:255,+:255),
P[104](-:55,-:100,-:100),
P[105](-:100,-:100,-:100),
P[106](-:100,-:100,-:100);
A[under_streak, 2] <=
P[101](+:91,+:50,+:86),
P[102](+:255,+:255,+:255),
P[103](-:55,-:100,-:100),
P[104](-:100,-:100,-:100),
P[105](-:100,-:100,-:100);
A[under_streak, 3] <=
P[100](+:91,+:50,+:86),
P[101](+:255,+:255,+:255),
P[102](-:55,-:100,-:100),
P[103](-:100,-:100,-:100),
P[104](-:100,-:100,-:100);
A[under_streak, 4] <=
P[99](+:91,+:50,+:86),
P[100](+:255,+:255,+:255),
P[101](-:55,-:100,-:100),
P[102](-:100,-:100,-:100),
P[103](-:100,-:100,-:100);
A[under_streak, 5] <=
P[98](+:91,+:50,+:86),
P[99](+:255,+:255,+:255),
P[100](-:55,-:100,-:100),
P[101](-:100,-:100,-:100),
P[102](-:100,-:100,-:100);
A[under_streak, 6] <=
P[97](+:91,+:50,+:86),
P[98](+:255,+:255,+:255),
P[99](-:55,-:100,-:100),
P[100](-:100,-:100,-:100),
P[101](-:100,-:100,-:100);
A[under_streak, 7] <=
P[128](+:91,+:50,+:86),
P[97](+:255,+:255,+:255),
P[98](-:55,-:100,-:100),
P[99](-:100,-:100,-:100),
P[100](-:100,-:100,-:100);
A[under_streak, 8] <=
P[127](+:91,+:50,+:86),
P[128](+:255,+:255,+:255),
P[97](-:55,-:100,-:100),
P[98](-:100,-:100,-:100),
P[99](-:100,-:100,-:100);
A[under_streak, 9] <=
P[126](+:91,+:50,+:86),
P[127](+:255,+:255,+:255),
P[128](-:55,-:100,-:100),
P[97](-:100,-:100,-:100),
P[98](-:100,-:100,-:100);
A[under_streak, 10] <=
P[125](+:91,+:50,+:86),
P[126](+:255,+:255,+:255),
P[127](-:55,-:100,-:100),
P[128](-:100,-:100,-:100),
P[97](-:100,-:100,-:100);
A[under_streak, 11] <=
P[124](+:91,+:50,+:86),
P[125](+:255,+:255,+:255),
P[126](-:55,-:100,-:100),
P[127](-:100,-:100,-:100),
P[128](-:100,-:100,-:100);
A[under_streak, 12] <=
P[123](+:91,+:50,+:86),
P[124](+:255,+:255,+:255),
P[125](-:55,-:100,-:100),
P[126](-:100,-:100,-:100),
P[127](-:100,-:100,-:100);
A[under_streak, 13] <=
P[122](+:91,+:50,+:86),
P[123](+:255,+:255,+:255),
P[124](-:55,-:100,-:100),
P[125](-:100,-:100,-:100),
P[126](-:100,-:100,-:100);
A[under_streak, 14] <=
P[121](+:91,+:50,+:86),
P[122](+:255,+:255,+:255),
P[123](-:55,-:100,-:100),
P[124](-:100,-:100,-:100),
P[125](-:100,-:100,-:100);
A[under_streak, 15] <=
P[120](+:91,+:50,+:86),
P[121](+:255,+:255,+:255),
P[122](-:55,-:100,-:100),
P[123](-:100,-:100,-:100),
P[124](-:100,-:100,-:100);
A[under_streak, 16] <=
P[119](+:91,+:50,+:86),
P[120](+:255,+:255,+:255),
P[121](-:55,-:100,-:100),
P[122](-:100,-:100,-:100),
P[123](-:100,-:100,-:100);
A[under_streak, 17] <=
P[118](+:91,+:50,+:86),
P[119](+:255,+:255,+:255),
P[120](-:55,-:100,-:100),
P[121](-:100,-:100,-:100),
P[122](-:100,-:100,-:100);
A[under_streak, 18] <=
P[117](+:91,+:50,+:86),
P[118](+:255,+:255,+:255),
P[119](-:55,-:100,-:100),
P[120](-:100,-:100,-:100),
P[121](-:100,-:100,-:100);
A[under_streak, 19] <=
P[116](+:91,+:50,+:86),
P[117](+:255,+:255,+:255),
P[118](-:55,-:100,-:100),
P[119](-:100,-:100,-:100),
P[120](-:100,-:100,-:100);
A[under_streak, 20] <=
P[115](+:91,+:50,+:86),
P[116](+:255,+:255,+:255),
P[117](-:55,-:100,-:100),
P[118](-:100,-:100,-:100),
P[119](-:100,-:100,-:100);
A[under_streak, 21] <=
P[114](+:91,+:50,+:86),
P[115](+:255,+:255,+:255),
P[116](-:55,-:100,-:100),
P[117](-:100,-:100,-:100),
P[118](-:100,-:100,-:100);
A[under_streak, 22] <=
P[113](+:91,+:50,+:86),
P[114](+:255,+:255,+:255),
P[115](-:55,-:100,-:100),
P[116](-:100,-:100,-:100),
P[117](-:100,-:100,-:100);
A[under_streak, 23] <=
P[112](+:91,+:50,+:86),
P[113](+:255,+:255,+:255),
P[114](-:55,-:100,-:100),
P[115](-:100,-:100,-:100),
P[116](-:100,-:100,-:100);
A[under_streak, 24] <=
P[111](+:91,+:50,+:86),
P[112](+:255,+:255,+:255),
P[113](-:55,-:100,-:100),
P[114](-:100,-:100,-:100),
P[115](-:100,-:100,-:100);
A[under_streak, 25] <=
P[110](+:91,+:50,+:86),
P[111](+:255,+:255,+:255),
P[112](-:55,-:100,-:100),
P[113](-:100,-:100,-:100),
P[114](-:100,-:100,-:100);
A[under_streak, 26] <=
P[109](+:91,+:50,+:86),
P[110](+:255,+:255,+:255),
P[111](-:55,-:100,-:100),
P[112](-:100,-:100,-:100),
P[113](-:100,-:100,-:100);
A[under_streak, 27] <=
P[108](+:91,+:50,+:86),
P[109](+:255,+:255,+:255),
P[110](-:55,-:100,-:100),
P[111](-:100,-:100,-:100),
P[112](-:100,-:100,-:100);
A[under_streak, 28] <=
P[107](+:91,+:50,+:86),
P[108](+:255,+:255,+:255),
P[109](-:55,-:100,-:100),
P[110](-:100,-:100,-:100),
P[111](-:100,-:100,-:100);
A[under_streak, 29] <=
P[106](+:91,+:50,+:86),
P[107](+:255,+:255,+:255),
P[108](-:55,-:100,-:100),
P[109](-:100,-:100,-:100),
P[110](-:100,-:100,-:100);
A[under_streak, 30] <=
P[105](+:91,+:50,+:86),
P[106](+:255,+:255,+:255),
P[107](-:55,-:100,-:100),
P[108](-:100,-:100,-:100),
P[109](-:100,-:100,-:100);
A[under_streak, 31] <=
P[104](+:91,+:50,+:86),
P[105](+:255,+:255,+:255),
P[106](-:55,-:100,-:100),
P[107](-:100,-:100,-:100),
P[108](-:100,-:100,-:100);
A[under_streak, 32] <=
P[103](+:91,+:50,+:86),
P[104](+:255,+:255,+:255),
P[105](-:55,-:100,-:100),
P[106](-:100,-:100,-:100),
P[107](-:100,-:100,-:100);
# Fade-out
A[under_streak, 33] <=
P[103](+:255,+:255,+:255),
P[104](-:55,-:100,-:100),
P[105](-:100,-:100,-:100),
P[106](-:200,-:200,-:200);
A[under_streak, 33] <=
P[103](-:55,-:100,-:100),
P[104](-:100,-:100,-:100),
P[105](-:200,-:200,-:200);
A[under_streak, 34] <=
P[103](-:100,-:100,-:100),
P[104](-:200,-:200,-:200);
A[under_streak, 35] <=
P[103](-:200,-:200,-:200);
A[under_streak, 36] <=
P[106](0,0,0);
# Rainbow wave
A[rainbow_wave] <= start, framedelay:3, framestretch, loop, replace:all, pfunc:interp;
A[rainbow_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,255,255), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,255,255), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[rainbow_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,255,255), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[rainbow_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,255,255), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[rainbow_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,255,255), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[rainbow_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,255,255), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[rainbow_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,255,255), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[rainbow_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,255,255), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[rainbow_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,255,255), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,255,255), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,255,255), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,255,255), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,255,255), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[rainbow_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,255,255), P[c:100%](127,0,255);
A[rainbow_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,255,255), P[c:102%](127,0,255);
A[rainbow_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,255,255), P[c:104%](127,0,255);
A[rainbow_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,255,255), P[c:106%](127,0,255);
A[rainbow_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,255,255), P[c:108%](127,0,255);
A[rainbow_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,255,255), P[c:110%](127,0,255);
A[rainbow_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,255,255), P[c:112%](127,0,255);
A[rainbow_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,255,255), P[c:114%](127,0,255);
A[rainbow_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,255,255), P[c:116%](127,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,255,255), P[c:118%](127,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,255,255), P[c:120%](127,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,255,255), P[c:122%](127,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,255,255), P[c:124%](127,0,255);
A[rainbow_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,255,255);
A[rainbow_wave, 29] <= P[c:-24%](255,255,255), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,255,255);
A[rainbow_wave, 30] <= P[c:-22%](255,255,255), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,255,255);
A[rainbow_wave, 31] <= P[c:-20%](255,255,255), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,255,255);
A[rainbow_wave, 32] <= P[c:-18%](255,255,255), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,255,255);
A[rainbow_wave, 33] <= P[c:-16%](255,255,255), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,255,255);
A[rainbow_wave, 34] <= P[c:-14%](255,255,255), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,255,255);
A[rainbow_wave, 35] <= P[c:-12%](255,255,255), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,255,255);
A[rainbow_wave, 36] <= P[c:-10%](255,255,255), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,255,255);
A[rainbow_wave, 37] <= P[c:-8%] (255,255,255), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,255,255);
A[rainbow_wave, 38] <= P[c:-6%] (255,255,255), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,255,255);
A[rainbow_wave, 39] <= P[c:-4%] (255,255,255), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,255,255);
A[rainbow_wave, 40] <= P[c:-2%] (255,255,255), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,255,255);
A[rainbow_wave, 41] <= P[c:0%] (255,255,255), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,255,255), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,255,255), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,255,255), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,255,255), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,255,255), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,255,255), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,255,255), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,255,255), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,255,255), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,255,255), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,255,255), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,255,255), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,255,255), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[rainbow_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,255,255), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[rainbow_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,255,255), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[rainbow_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,255,255), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[rainbow_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,255,255), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[rainbow_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,255,255), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[rainbow_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,255,255), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[rainbow_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,255,255), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[rainbow_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,255,255), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[rainbow_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,255,255), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[rainbow_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,255,255), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[rainbow_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,255,255), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[rainbow_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,255,255), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
# Start/stop wave
U["Print"] :+ A[rainbow_wave](start);
U["Pause"] :+ A[rainbow_wave](start, loops:1, framedelay:0);
# Keys only color rotation
A[keys_only_rotation] <= framedelay:0xF, pfunc:interp;
A[keys_only_rotation, 1] <= S0x01(0xC1, 0x2D, 0x1D), S0x5F(0xC1, 0x2D, 0x1D);
A[keys_only_rotation, 2] <= S0x01(0xEE, 0x6D, 0x28), S0x5F(0xEE, 0x6D, 0x28);
A[keys_only_rotation, 3] <= S0x01(0xE0, 0x9E, 0x3B), S0x5F(0xE0, 0x9E, 0x3B);
A[keys_only_rotation, 4] <= S0x01(0xE5, 0xC9, 0x43), S0x5F(0xE5, 0xC9, 0x43);
A[keys_only_rotation, 5] <= S0x01(0x1E, 0xB8, 0x6D), S0x5F(0x1E, 0xB8, 0x6D);
A[keys_only_rotation, 6] <= S0x01(0x00, 0xB3, 0xA6), S0x5F(0x00, 0xB3, 0xA6);
A[keys_only_rotation, 7] <= S0x01(0x20, 0x82, 0xC6), S0x5F(0x20, 0x82, 0xC6);
A[keys_only_rotation, 8] <= S0x01(0x43, 0x54, 0xC1), S0x5F(0x43, 0x54, 0xC1);
A[keys_only_rotation, 9] <= S0x01(0x71, 0x1C, 0x9E), S0x5F(0x71, 0x1C, 0x9E);
A[keys_only_rotation, 10] <= S0x01(0xCD, 0x3B, 0x70), S0x5F(0xCD, 0x3B, 0x70);
A[keys_only_rotation, 11] <= S0x01(0xB8, 0x34, 0x3E), S0x5F(0xB8, 0x34, 0x3E);
# Underlighting only color rotation
A[underlighting_only_rotation] <= framedelay:0xF, pfunc:interp;
A[underlighting_only_rotation, 1] <= P[88](0xC1, 0x2D, 0x1D), P[128](0xC1, 0x2D, 0x1D);
A[underlighting_only_rotation, 2] <= P[88](0xEE, 0x6D, 0x28), P[128](0xEE, 0x6D, 0x28);
A[underlighting_only_rotation, 3] <= P[88](0xE0, 0x9E, 0x3B), P[128](0xE0, 0x9E, 0x3B);
A[underlighting_only_rotation, 4] <= P[88](0xE5, 0xC9, 0x43), P[128](0xE5, 0xC9, 0x43);
A[underlighting_only_rotation, 5] <= P[88](0x1E, 0xB8, 0x6D), P[128](0x1E, 0xB8, 0x6D);
A[underlighting_only_rotation, 6] <= P[88](0x00, 0xB3, 0xA6), P[128](0x00, 0xB3, 0xA6);
A[underlighting_only_rotation, 7] <= P[88](0x20, 0x82, 0xC6), P[128](0x20, 0x82, 0xC6);
A[underlighting_only_rotation, 8] <= P[88](0x43, 0x54, 0xC1), P[128](0x43, 0x54, 0xC1);
A[underlighting_only_rotation, 9] <= P[88](0x71, 0x1C, 0x9E), P[128](0x71, 0x1C, 0x9E);
A[underlighting_only_rotation, 10] <= P[88](0xCD, 0x3B, 0x70), P[128](0xCD, 0x3B, 0x70);
A[underlighting_only_rotation, 11] <= P[88](0xB8, 0x34, 0x3E), P[128](0xB8, 0x34, 0x3E);
U"F1" +: A[keys_only_rotation](start, framedelay:0x1F);
U"F2" +: A[keys_only_rotation](start);
U"F3" +: A[keys_only_rotation](start, framedelay:1);
U"F4" +: A[keys_only_rotation](pause);
U"F5" +: A[underlighting_only_rotation](start, framedelay:0x1F);
U"F6" +: A[underlighting_only_rotation](start);
U"F7" +: A[underlighting_only_rotation](start, framedelay:1);
U"F8" +: A[underlighting_only_rotation](pause);
# TODO
# Esc, reset animations
U"Esc" +: animation_control(4);
# Rainbow Animation example
A[rainbow_fill_interp] <= loop, replace:basic, pfunc:interp;
A[rainbow_fill_interp, 1] <= P[c:0%](0,255,0), P[c:25%](255,255,0), P[c:50%](255,0,0), P[c:75%](127,0,255), P[c:100%](0,0,255);
#A[rainbow_fill_interp, 1] <= P[c:0%](0,255,0), P[c:25%](255,255,0), P[c:50%](255,0,0), P[c:75%](127,0,255);
A[rainbow_fill_interp, 2] <= P[c:0%](0,255,0), P[c:27%](255,255,0), P[c:52%](255,0,0), P[c:77%](127,0,255), P[c:100%](0,0,255);
#A[rainbow_fill_interp, 2] <= P[c:0%](0,255,0), P[c:27%](255,255,0), P[c:52%](255,0,0), P[c:77%](127,0,255);
A[rainbow_fill_interp, 3] <= P[c:0%](0,255,0), P[c:29%](255,255,0), P[c:54%](255,0,0), P[c:79%](127,0,255), P[c:100%](0,0,255);
#A[rainbow_fill_interp, 3] <= P[c:0%](0,255,0), P[c:29%](255,255,0), P[c:54%](255,0,0), P[c:79%](127,0,255);
A[rainbow_fill_interp, 4] <= P[c:0%](0,255,0), P[c:31%](255,255,0), P[c:56%](255,0,0), P[c:81%](127,0,255), P[c:100%](0,0,255);
#A[rainbow_fill_interp, 4] <= P[c:0%](0,255,0), P[c:31%](255,255,0), P[c:56%](255,0,0), P[c:81%](127,0,255);
#A[rainbow_fill_interp, 4] <= P[c:56%](255,0,0), P[c:81%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 5] <= P[c:0%](0,255,0), P[c:33%](255,255,0), P[c:58%](255,0,0), P[c:83%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 6] <= P[c:0%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,0,0), P[c:85%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 7] <= P[c:0%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,0,0), P[c:87%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 8] <= P[c:0%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,0,0), P[c:89%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 9] <= P[c:0%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,0,0), P[c:91%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 10] <= P[c:0%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,0,0), P[c:93%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 11] <= P[c:0%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,0,0), P[c:95%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 12] <= P[c:0%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,0,0), P[c:97%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 13] <= P[c:0%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,0,0), P[c:95%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 14] <= P[c:0%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,0,0), P[c:93%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 15] <= P[c:0%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,0,0), P[c:91%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 16] <= P[c:0%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,0,0), P[c:89%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 17] <= P[c:0%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,0,0), P[c:87%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 18] <= P[c:0%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,0,0), P[c:85%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 19] <= P[c:0%](0,255,0), P[c:33%](255,255,0), P[c:58%](255,0,0), P[c:83%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 20] <= P[c:0%](0,255,0), P[c:31%](255,255,0), P[c:56%](255,0,0), P[c:81%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 21] <= P[c:0%](0,255,0), P[c:29%](255,255,0), P[c:54%](255,0,0), P[c:79%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 22] <= P[c:0%](0,255,0), P[c:27%](255,255,0), P[c:52%](255,0,0), P[c:77%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 23] <= P[c:0%](0,255,0), P[c:25%](255,255,0), P[c:50%](255,0,0), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 24] <= P[c:0%](0,255,0), P[c:23%](255,255,0), P[c:47%](255,0,0), P[c:73%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 25] <= P[c:0%](0,255,0), P[c:21%](255,255,0), P[c:44%](255,0,0), P[c:71%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 26] <= P[c:0%](0,255,0), P[c:19%](255,255,0), P[c:41%](255,0,0), P[c:69%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 27] <= P[c:0%](0,255,0), P[c:17%](255,255,0), P[c:38%](255,0,0), P[c:67%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 28] <= P[c:0%](0,255,0), P[c:15%](255,255,0), P[c:35%](255,0,0), P[c:65%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 29] <= P[c:0%](0,255,0), P[c:13%](255,255,0), P[c:33%](255,0,0), P[c:63%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 30] <= P[c:0%](0,255,0), P[c:11%](255,255,0), P[c:30%](255,0,0), P[c:61%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 31] <= P[c:0%](0,255,0), P[c:9%](255,255,0), P[c:27%](255,0,0), P[c:59%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 32] <= P[c:0%](0,255,0), P[c:7%](255,255,0), P[c:24%](255,0,0), P[c:57%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 33] <= P[c:0%](0,255,0), P[c:5%](255,255,0), P[c:21%](255,0,0), P[c:55%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 34] <= P[c:0%](0,255,0), P[c:3%](255,255,0), P[c:18%](255,0,0), P[c:53%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 35] <= P[c:0%](0,255,0), P[c:3%](255,255,0), P[c:18%](255,0,0), P[c:53%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 36] <= P[c:0%](0,255,0), P[c:3%](255,255,0), P[c:18%](255,0,0), P[c:53%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 37] <= P[c:0%](0,255,0), P[c:10%](255,255,0), P[c:20%](255,0,0), P[c:60%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 38] <= P[c:0%](0,255,0), P[c:20%](255,255,0), P[c:40%](255,0,0), P[c:70%](127,0,255), P[c:100%](0,0,255);
A[rainbow_fill_interp, 39] <= P[c:0%](0,255,0), P[c:25%](255,255,0), P[c:50%](255,0,0), P[c:75%](127,0,255), P[c:100%](0,0,255);
#A[rainbow_fill_interp, 40] <= P[c:0%](100,100,100), P[c:100%](100,100,100);
U["A"-"Z"] :+ A[rainbow_fill_interp](loops:1);
U["1"] :+ A[under_streak](framedelay:1, loops:1);
U["2"] :+ A[under_streak](framedelay:1, loops:2);
U["3"] :+ A[under_streak](framedelay:1, loops:3);
U["4"] :+ A[under_streak](framedelay:1, loops:4);
U["5"] :+ A[under_streak](framedelay:1, loops:5);
U["6"] :+ A[under_streak](framedelay:1, loops:6);
U["7"] :+ A[under_streak](framedelay:1, loops:7);
U["8"] :+ A[under_streak](framedelay:1, loops:8);
U["9"] :+ A[under_streak](framedelay:1, loops:9);
U["0"] :+ A[under_streak](loops:1);
PK RKMW7]f+ f+ kll/layouts/kira/demo.1.kll# Animation Example Configuration
Name = "Demo 1";
Version = 1.0;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-11-11;
### Animations ###
A[leds_ind] <= start, framedelay:10, framestretch, loop;
A[leds_ind, 1] <= P[126](127,0,55), P[127](127,200,55), P[128](127,0,55);
A[leds_ind, 2] <= P[126](127,0,25), P[127](127,200,25), P[128](180,0,25);
A[leds_ind, 3] <= P[126](127,0,0), P[127](127,200,0), P[128](227,0,0);
A[leds_ind, 4] <= P[126](127,0,0), P[127](127,200,0), P[128](167,0,0);
A[leds_ind, 5] <= P[126](100,0,0), P[127](100,200,0), P[128](100,0,0);
A[leds_ind, 6] <= P[126](70,0,0), P[127](70,200,0), P[128](70,0,0);
A[leds_ind, 7] <= P[126](40,0,0), P[127](40,200,0), P[128](40,0,0);
A[leds_ind, 8] <= P[126](20,0,0), P[127](20,200,0), P[128](20,0,0);
A[leds_ind, 9] <= P[126](0,0,0), P[127](0,200,0), P[128](0,0,0);
A[leds_ind, 10]<= P[126](50,0,30), P[127](50,200,30), P[128](50,0,30);
# Rainbow wave
A[rainbow_wave] <= start, framedelay:3, framestretch, loop, replace:all, pfunc:interp;
A[rainbow_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,255,255), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,255,255), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[rainbow_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,255,255), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[rainbow_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,255,255), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[rainbow_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,255,255), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[rainbow_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,255,255), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[rainbow_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,255,255), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[rainbow_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,255,255), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[rainbow_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,255,255), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,255,255), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,255,255), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,255,255), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,255,255), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[rainbow_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,255,255), P[c:100%](127,0,255);
A[rainbow_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,255,255), P[c:102%](127,0,255);
A[rainbow_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,255,255), P[c:104%](127,0,255);
A[rainbow_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,255,255), P[c:106%](127,0,255);
A[rainbow_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,255,255), P[c:108%](127,0,255);
A[rainbow_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,255,255), P[c:110%](127,0,255);
A[rainbow_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,255,255), P[c:112%](127,0,255);
A[rainbow_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,255,255), P[c:114%](127,0,255);
A[rainbow_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,255,255), P[c:116%](127,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,255,255), P[c:118%](127,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,255,255), P[c:120%](127,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,255,255), P[c:122%](127,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,255,255), P[c:124%](127,0,255);
A[rainbow_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,255,255);
A[rainbow_wave, 29] <= P[c:-24%](255,255,255), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,255,255);
A[rainbow_wave, 30] <= P[c:-22%](255,255,255), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,255,255);
A[rainbow_wave, 31] <= P[c:-20%](255,255,255), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,255,255);
A[rainbow_wave, 32] <= P[c:-18%](255,255,255), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,255,255);
A[rainbow_wave, 33] <= P[c:-16%](255,255,255), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,255,255);
A[rainbow_wave, 34] <= P[c:-14%](255,255,255), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,255,255);
A[rainbow_wave, 35] <= P[c:-12%](255,255,255), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,255,255);
A[rainbow_wave, 36] <= P[c:-10%](255,255,255), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,255,255);
A[rainbow_wave, 37] <= P[c:-8%] (255,255,255), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,255,255);
A[rainbow_wave, 38] <= P[c:-6%] (255,255,255), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,255,255);
A[rainbow_wave, 39] <= P[c:-4%] (255,255,255), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,255,255);
A[rainbow_wave, 40] <= P[c:-2%] (255,255,255), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,255,255);
A[rainbow_wave, 41] <= P[c:0%] (255,255,255), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,255,255), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,255,255), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,255,255), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,255,255), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,255,255), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,255,255), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,255,255), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,255,255), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,255,255), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,255,255), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,255,255), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,255,255), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,255,255), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[rainbow_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,255,255), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[rainbow_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,255,255), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[rainbow_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,255,255), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[rainbow_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,255,255), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[rainbow_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,255,255), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[rainbow_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,255,255), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[rainbow_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,255,255), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[rainbow_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,255,255), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[rainbow_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,255,255), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[rainbow_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,255,255), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[rainbow_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,255,255), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[rainbow_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,255,255), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
PK RKMq- - kll/layouts/kira/demo.2.kll# Animation Example Configuration
Name = "Demo 2";
Version = 1.0;
Author = "HaaTa (Jacob Alexander) 2018";
KLL = 0.5;
# Modified Date
Date = 2018-02-16;
### Animations ###
# Indicator LEDs
A[leds_ind] <= replace:all;
A[leds_ind, 1] <= P[118](0x30,0x4F,0xFE), P[119](0x00,0xC8,53), P[120](0xDD,0x2C,0x00);
# Blue bottom
A[blue_foot] <= replace:all;
A[blue_foot, 1] <=
P[ 2](0x30,0x4F,0xFE),
P[ 4](0x30,0x4F,0xFE),
P[ 6](0x30,0x4F,0xFE),
P[ 8](0x30,0x4F,0xFE),
P[10](0x30,0x4F,0xFE),
P[12](0x30,0x4F,0xFE),
P[14](0x30,0x4F,0xFE),
P[16](0x30,0x4F,0xFE),
P[18](0x30,0x4F,0xFE),
P[20](0x30,0x4F,0xFE),
P[22](0x30,0x4F,0xFE),
P[24](0x30,0x4F,0xFE),
P[26](0x30,0x4F,0xFE),
P[28](0x30,0x4F,0xFE),
P[30](0x30,0x4F,0xFE),
P[32](0x30,0x4F,0xFE),
P[34](0x30,0x4F,0xFE),
P[36](0x30,0x4F,0xFE);
ISSI_FrameRate_ms = 30; # 1000 / = 100 fps
# Global Brightness Control
U"F1" : ledControl(2, 0);
U"F2" : ledControl(2, 25);
U"F3" : ledControl(2, 50);
U"F4" : ledControl(2, 75);
U"F5" : ledControl(2, 100); # 40% Brightness
U"F6" : ledControl(2, 125);
U"F7" : ledControl(2, 150); # 60% Brightness
U"F8" : ledControl(2, 175);
U"F9" : ledControl(2, 200);
U"F10" : ledControl(2, 225);
U"F11" : ledControl(2, 255);
U"Esc" : A[rainbow_wave](start);
U"F12" : A[rainbow_wave](stop);
U"1" : A[blue_foot](start);
U"2" : A[leds_ind](start);
U"Delete" : animation_control(3);
# Off
#A[off] <= pfunc:interp, repla
#A[off, 1] <= P[c:0%](0,0,0), P[c:100%](0,0,0);
#A[off_led] <= replace:all;
#A[off_led, 1] <= P[118](0,0,0), P[119](0,0,0), P[120](0,0,0);
# Rainbow wave
A[rainbow_wave] <= start, framedelay:0, framestretch, loop, replace:all, pfunc:interp;
A[rainbow_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,255,255), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,255,255), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[rainbow_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,255,255), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[rainbow_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,255,255), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[rainbow_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,255,255), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[rainbow_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,255,255), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[rainbow_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,255,255), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[rainbow_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,255,255), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[rainbow_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,255,255), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,255,255), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,255,255), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,255,255), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,255,255), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[rainbow_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,255,255), P[c:100%](127,0,255);
A[rainbow_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,255,255), P[c:102%](127,0,255);
A[rainbow_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,255,255), P[c:104%](127,0,255);
A[rainbow_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,255,255), P[c:106%](127,0,255);
A[rainbow_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,255,255), P[c:108%](127,0,255);
A[rainbow_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,255,255), P[c:110%](127,0,255);
A[rainbow_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,255,255), P[c:112%](127,0,255);
A[rainbow_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,255,255), P[c:114%](127,0,255);
A[rainbow_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,255,255), P[c:116%](127,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,255,255), P[c:118%](127,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,255,255), P[c:120%](127,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,255,255), P[c:122%](127,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,255,255), P[c:124%](127,0,255);
A[rainbow_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,255,255);
A[rainbow_wave, 29] <= P[c:-24%](255,255,255), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,255,255);
A[rainbow_wave, 30] <= P[c:-22%](255,255,255), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,255,255);
A[rainbow_wave, 31] <= P[c:-20%](255,255,255), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,255,255);
A[rainbow_wave, 32] <= P[c:-18%](255,255,255), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,255,255);
A[rainbow_wave, 33] <= P[c:-16%](255,255,255), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,255,255);
A[rainbow_wave, 34] <= P[c:-14%](255,255,255), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,255,255);
A[rainbow_wave, 35] <= P[c:-12%](255,255,255), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,255,255);
A[rainbow_wave, 36] <= P[c:-10%](255,255,255), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,255,255);
A[rainbow_wave, 37] <= P[c:-8%] (255,255,255), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,255,255);
A[rainbow_wave, 38] <= P[c:-6%] (255,255,255), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,255,255);
A[rainbow_wave, 39] <= P[c:-4%] (255,255,255), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,255,255);
A[rainbow_wave, 40] <= P[c:-2%] (255,255,255), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,255,255);
A[rainbow_wave, 41] <= P[c:0%] (255,255,255), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,255,255), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,255,255), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,255,255), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,255,255), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,255,255), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,255,255), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,255,255), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,255,255), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,255,255), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,255,255), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,255,255), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,255,255), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,255,255), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[rainbow_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,255,255), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[rainbow_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,255,255), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[rainbow_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,255,255), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[rainbow_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,255,255), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[rainbow_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,255,255), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[rainbow_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,255,255), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[rainbow_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,255,255), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[rainbow_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,255,255), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[rainbow_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,255,255), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[rainbow_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,255,255), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[rainbow_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,255,255), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[rainbow_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,255,255), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
PK RKM:U( U( kll/layouts/kira/old.demo.1.kll# Animation Example Configuration
Name = "Demo 1";
Version = 1.0;
Author = "HaaTa (Jacob Alexander) 2017";
KLL = 0.5;
# Modified Date
Date = 2017-11-09;
### Animations ###
# Rainbow wave
A[rainbow_wave] <= start, framedelay:3, framestretch, loop, replace:all, pfunc:interp;
A[rainbow_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,255,255), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,255,255), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[rainbow_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,255,255), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[rainbow_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,255,255), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[rainbow_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,255,255), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[rainbow_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,255,255), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[rainbow_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,255,255), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[rainbow_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,255,255), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[rainbow_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,255,255), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,255,255), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,255,255), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,255,255), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,255,255), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[rainbow_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,255,255), P[c:100%](127,0,255);
A[rainbow_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,255,255), P[c:102%](127,0,255);
A[rainbow_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,255,255), P[c:104%](127,0,255);
A[rainbow_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,255,255), P[c:106%](127,0,255);
A[rainbow_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,255,255), P[c:108%](127,0,255);
A[rainbow_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,255,255), P[c:110%](127,0,255);
A[rainbow_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,255,255), P[c:112%](127,0,255);
A[rainbow_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,255,255), P[c:114%](127,0,255);
A[rainbow_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,255,255), P[c:116%](127,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,255,255), P[c:118%](127,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,255,255), P[c:120%](127,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,255,255), P[c:122%](127,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,255,255), P[c:124%](127,0,255);
A[rainbow_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,255,255);
A[rainbow_wave, 29] <= P[c:-24%](255,255,255), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,255,255);
A[rainbow_wave, 30] <= P[c:-22%](255,255,255), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,255,255);
A[rainbow_wave, 31] <= P[c:-20%](255,255,255), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,255,255);
A[rainbow_wave, 32] <= P[c:-18%](255,255,255), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,255,255);
A[rainbow_wave, 33] <= P[c:-16%](255,255,255), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,255,255);
A[rainbow_wave, 34] <= P[c:-14%](255,255,255), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,255,255);
A[rainbow_wave, 35] <= P[c:-12%](255,255,255), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,255,255);
A[rainbow_wave, 36] <= P[c:-10%](255,255,255), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,255,255);
A[rainbow_wave, 37] <= P[c:-8%] (255,255,255), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,255,255);
A[rainbow_wave, 38] <= P[c:-6%] (255,255,255), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,255,255);
A[rainbow_wave, 39] <= P[c:-4%] (255,255,255), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,255,255);
A[rainbow_wave, 40] <= P[c:-2%] (255,255,255), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,255,255);
A[rainbow_wave, 41] <= P[c:0%] (255,255,255), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,255,255), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,255,255), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,255,255), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,255,255), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,255,255), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,255,255), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,255,255), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,255,255), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,255,255), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,255,255), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,255,255), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,255,255), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,255,255), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[rainbow_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,255,255), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[rainbow_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,255,255), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[rainbow_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,255,255), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[rainbow_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,255,255), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[rainbow_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,255,255), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[rainbow_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,255,255), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[rainbow_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,255,255), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[rainbow_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,255,255), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[rainbow_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,255,255), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[rainbow_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,255,255), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[rainbow_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,255,255), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[rainbow_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,255,255), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
PK SMenQ42 2 kll/layouts/kira/release.1.kll# Kira Release Animations and Default Map
Name = Kira;
Version = 0.2;
Author = "HaaTa (Jacob Alexander) 2018";
KLL = 0.5;
# Modified Date
Date = 2018-10-13;
### Layer Combinations ###
U"RCtrl" + U"RShift" : Layer[1];
U"RCtrl" + U"RAlt" : Layer[2];
### Layer Fade Updates ###
Layer[1] : fade_layer_highlight(1);
Layer[2] : fade_layer_highlight(2);
### Default Animation ###
A[default_animation] <= start, loops:1, pfunc:interp, replace:clear;
A[default_animation, 1] <= P[1](0,0,0), P[128](0,0,0);
A[default_animation, 2] <= P[1](10,10,10), P[128](10,10,10);
A[default_animation, 3] <= P[1](20,20,20), P[128](20,20,20);
A[default_animation, 4] <= P[1](30,30,30), P[128](30,30,30);
A[default_animation, 5] <= P[1](40,40,40), P[128](40,40,40);
A[default_animation, 6] <= P[1](50,50,50), P[128](50,50,50);
A[default_animation, 7] <= P[1](60,60,60), P[128](60,60,60);
A[default_animation, 8] <= P[1](70,70,70), P[128](70,70,70);
A[default_animation, 9] <= P[1](80,80,80), P[128](80,80,80);
A[default_animation, 10] <= P[1](90,90,90), P[128](90,90,90);
A[default_animation, 11] <= P[1](100,100,100), P[128](100,100,100);
A[default_animation, 12] <= P[1](110,110,110), P[128](110,110,110);
A[default_animation, 13] <= P[1](120,120,120), P[128](120,120,120);
A[default_animation, 14] <= P[1](130,130,130), P[128](130,130,130);
A[default_animation, 15] <= P[1](140,140,140), P[128](140,140,140);
A[default_animation, 16] <= P[1](150,150,150), P[128](150,150,150);
A[default_animation, 17] <= P[1](160,160,160), P[128](160,160,160);
A[default_animation, 18] <= P[1](170,170,170), P[128](170,170,170);
A[default_animation, 19] <= P[1](180,180,180), P[128](180,180,180);
A[default_animation, 20] <= P[1](190,190,190), P[128](190,190,190);
A[default_animation, 21] <= P[1](200,200,200), P[128](200,200,200);
A[default_animation, 22] <= P[1](210,210,210), P[128](210,210,210);
A[default_animation, 23] <= P[1](220,220,220), P[128](220,220,220);
A[default_animation, 24] <= P[1](230,230,230), P[128](230,230,230);
A[default_animation, 25] <= P[1](240,240,240), P[128](240,240,240);
A[default_animation, 26] <= P[1](250,250,250), P[128](250,250,250);
A[default_animation, 27] <= P[1](255,255,255), P[128](255,255,255);
A[default_animation, 28] <= P[1](255,250,250), P[128](255,250,250);
A[default_animation, 29] <= P[1](255,240,240), P[128](255,240,240);
A[default_animation, 30] <= P[1](255,230,230), P[128](255,230,230);
A[default_animation, 31] <= P[1](255,220,220), P[128](255,220,220);
A[default_animation, 32] <= P[1](255,210,210), P[128](255,210,210);
A[default_animation, 33] <= P[1](255,200,200), P[128](255,200,200);
A[default_animation, 34] <= P[1](255,190,190), P[128](255,190,190);
A[default_animation, 35] <= P[1](255,180,180), P[128](255,180,180);
A[default_animation, 36] <= P[1](255,170,170), P[128](255,170,170);
A[default_animation, 37] <= P[1](255,160,160), P[128](255,160,160);
A[default_animation, 38] <= P[1](255,150,150), P[128](255,150,150);
A[default_animation, 39] <= P[1](255,140,140), P[128](255,140,140);
A[default_animation, 40] <= P[1](255,130,130), P[128](255,130,130);
A[default_animation, 41] <= P[1](255,120,120), P[128](255,120,120);
A[default_animation, 42] <= P[1](255,110,110), P[128](255,110,110);
A[default_animation, 43] <= P[1](255,100,100), P[128](255,100,100);
A[default_animation, 44] <= P[1](255,90,90), P[128](255,90,90);
A[default_animation, 45] <= P[1](255,88,80), P[128](255,88,80);
A[default_animation, 46] <= P[1](255,88,70), P[128](255,88,70);
A[default_animation, 47] <= P[1](255,88,60), P[128](255,88,60);
A[default_animation, 48] <= P[1](255,88,50), P[128](255,88,50);
A[default_animation, 49] <= P[1](255,88,40), P[128](255,88,40);
A[default_animation, 50] <= P[1](255,88,30), P[128](255,88,30);
A[default_animation, 51] <= P[1](255,88,20), P[128](255,88,20);
A[default_animation, 52] <= P[1](255,88,10), P[128](255,88,10);
A[default_animation, 53] <= P[1](255,88,0), P[128](255,88,0);
A[default_animation, 53] <= P[126-128](0,0,0); # Indicators
# Test Color (All-White)
A[all_white] <= loops:1, replace:clear, pfunc:interp;
A[all_white, 1] <= P[1](255,255,255), P[128](255,255,255);
### Wave Animations ###
# Miami wave
A[miami_wave] <= framedelay:3, framestretch, loop, replace:clear, pfunc:interp;
A[miami_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,255,255), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[miami_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,255,255), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[miami_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,255,255), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[miami_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,255,255), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[miami_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,255,255), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[miami_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,255,255), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[miami_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,255,255), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[miami_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,255,255), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[miami_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,255,255), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[miami_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,255,255), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[miami_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,255,255), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[miami_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,255,255), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[miami_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,255,255), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[miami_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,255,255), P[c:100%](127,0,255);
A[miami_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,255,255), P[c:102%](127,0,255);
A[miami_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,255,255), P[c:104%](127,0,255);
A[miami_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,255,255), P[c:106%](127,0,255);
A[miami_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,255,255), P[c:108%](127,0,255);
A[miami_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,255,255), P[c:110%](127,0,255);
A[miami_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,255,255), P[c:112%](127,0,255);
A[miami_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,255,255), P[c:114%](127,0,255);
A[miami_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,255,255), P[c:116%](127,0,255);
A[miami_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,255,255), P[c:118%](127,0,255);
A[miami_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,255,255), P[c:120%](127,0,255);
A[miami_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,255,255), P[c:122%](127,0,255);
A[miami_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,255,255), P[c:124%](127,0,255);
A[miami_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,255,255);
A[miami_wave, 29] <= P[c:-24%](255,255,255), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,255,255);
A[miami_wave, 30] <= P[c:-22%](255,255,255), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,255,255);
A[miami_wave, 31] <= P[c:-20%](255,255,255), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,255,255);
A[miami_wave, 32] <= P[c:-18%](255,255,255), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,255,255);
A[miami_wave, 33] <= P[c:-16%](255,255,255), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,255,255);
A[miami_wave, 34] <= P[c:-14%](255,255,255), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,255,255);
A[miami_wave, 35] <= P[c:-12%](255,255,255), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,255,255);
A[miami_wave, 36] <= P[c:-10%](255,255,255), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,255,255);
A[miami_wave, 37] <= P[c:-8%] (255,255,255), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,255,255);
A[miami_wave, 38] <= P[c:-6%] (255,255,255), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,255,255);
A[miami_wave, 39] <= P[c:-4%] (255,255,255), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,255,255);
A[miami_wave, 40] <= P[c:-2%] (255,255,255), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,255,255);
A[miami_wave, 41] <= P[c:0%] (255,255,255), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[miami_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,255,255), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[miami_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,255,255), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[miami_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,255,255), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[miami_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,255,255), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[miami_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,255,255), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[miami_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,255,255), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[miami_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,255,255), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[miami_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,255,255), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[miami_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,255,255), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[miami_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,255,255), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[miami_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,255,255), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[miami_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,255,255), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[miami_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,255,255), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[miami_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,255,255), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[miami_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,255,255), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[miami_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,255,255), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[miami_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,255,255), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[miami_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,255,255), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[miami_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,255,255), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[miami_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,255,255), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[miami_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,255,255), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[miami_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,255,255), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[miami_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,255,255), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[miami_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,255,255), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[miami_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,255,255), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
# Rainbow wave
A[rainbow_wave] <= framedelay:3, framestretch, loop, replace:clear, pfunc:interp;
A[rainbow_wave, 1] <= P[c:0%] (0,255,0), P[c:25%](255,255,0), P[c:50%](255,0,0), P[c:75%](127,0,255), P[c:100%](0,0,255);
A[rainbow_wave, 2] <= P[c:-24%](0,0,255), P[c:2%] (0,255,0), P[c:27%](255,255,0), P[c:52%](255,0,0), P[c:77%](127,0,255), P[c:102%](0,0,255);
A[rainbow_wave, 3] <= P[c:-22%](0,0,255), P[c:4%] (0,255,0), P[c:29%](255,255,0), P[c:54%](255,0,0), P[c:79%](127,0,255), P[c:104%](0,0,255);
A[rainbow_wave, 4] <= P[c:-20%](0,0,255), P[c:6%] (0,255,0), P[c:31%](255,255,0), P[c:56%](255,0,0), P[c:81%](127,0,255), P[c:106%](0,0,255);
A[rainbow_wave, 5] <= P[c:-18%](0,0,255), P[c:8%] (0,255,0), P[c:33%](255,255,0), P[c:58%](255,0,0), P[c:83%](127,0,255), P[c:108%](0,0,255);
A[rainbow_wave, 6] <= P[c:-16%](0,0,255), P[c:10%](0,255,0), P[c:35%](255,255,0), P[c:60%](255,0,0), P[c:85%](127,0,255), P[c:110%](0,0,255);
A[rainbow_wave, 7] <= P[c:-14%](0,0,255), P[c:12%](0,255,0), P[c:37%](255,255,0), P[c:62%](255,0,0), P[c:87%](127,0,255), P[c:112%](0,0,255);
A[rainbow_wave, 8] <= P[c:-12%](0,0,255), P[c:14%](0,255,0), P[c:39%](255,255,0), P[c:64%](255,0,0), P[c:89%](127,0,255), P[c:114%](0,0,255);
A[rainbow_wave, 9] <= P[c:-10%](0,0,255), P[c:16%](0,255,0), P[c:41%](255,255,0), P[c:66%](255,0,0), P[c:91%](127,0,255), P[c:116%](0,0,255);
A[rainbow_wave, 10] <= P[c:-8%] (0,0,255), P[c:18%](0,255,0), P[c:43%](255,255,0), P[c:68%](255,0,0), P[c:93%](127,0,255), P[c:118%](0,0,255);
A[rainbow_wave, 11] <= P[c:-6%] (0,0,255), P[c:20%](0,255,0), P[c:45%](255,255,0), P[c:70%](255,0,0), P[c:95%](127,0,255), P[c:120%](0,0,255);
A[rainbow_wave, 12] <= P[c:-4%] (0,0,255), P[c:22%](0,255,0), P[c:47%](255,255,0), P[c:72%](255,0,0), P[c:97%](127,0,255), P[c:122%](0,0,255);
A[rainbow_wave, 13] <= P[c:-2%] (0,0,255), P[c:24%](0,255,0), P[c:49%](255,255,0), P[c:74%](255,0,0), P[c:99%](127,0,255), P[c:124%](0,0,255);
A[rainbow_wave, 14] <= P[c:0%] (0,0,255), P[c:25%](0,255,0), P[c:50%](255,255,0), P[c:75%](255,0,0), P[c:100%](127,0,255);
A[rainbow_wave, 15] <= P[c:-24%](127,0,255), P[c:2%] (0,0,255), P[c:27%](0,255,0), P[c:52%](255,255,0), P[c:77%](255,0,0), P[c:102%](127,0,255);
A[rainbow_wave, 16] <= P[c:-22%](127,0,255), P[c:4%] (0,0,255), P[c:29%](0,255,0), P[c:54%](255,255,0), P[c:79%](255,0,0), P[c:104%](127,0,255);
A[rainbow_wave, 17] <= P[c:-20%](127,0,255), P[c:6%] (0,0,255), P[c:31%](0,255,0), P[c:56%](255,255,0), P[c:81%](255,0,0), P[c:106%](127,0,255);
A[rainbow_wave, 18] <= P[c:-18%](127,0,255), P[c:8%] (0,0,255), P[c:33%](0,255,0), P[c:58%](255,255,0), P[c:83%](255,0,0), P[c:108%](127,0,255);
A[rainbow_wave, 19] <= P[c:-16%](127,0,255), P[c:10%](0,0,255), P[c:35%](0,255,0), P[c:60%](255,255,0), P[c:85%](255,0,0), P[c:110%](127,0,255);
A[rainbow_wave, 20] <= P[c:-14%](127,0,255), P[c:12%](0,0,255), P[c:37%](0,255,0), P[c:62%](255,255,0), P[c:87%](255,0,0), P[c:112%](127,0,255);
A[rainbow_wave, 21] <= P[c:-12%](127,0,255), P[c:14%](0,0,255), P[c:39%](0,255,0), P[c:64%](255,255,0), P[c:89%](255,0,0), P[c:114%](127,0,255);
A[rainbow_wave, 22] <= P[c:-10%](127,0,255), P[c:16%](0,0,255), P[c:41%](0,255,0), P[c:66%](255,255,0), P[c:91%](255,0,0), P[c:116%](127,0,255);
A[rainbow_wave, 23] <= P[c:-8%] (127,0,255), P[c:18%](0,0,255), P[c:43%](0,255,0), P[c:68%](255,255,0), P[c:93%](255,0,0), P[c:118%](127,0,255);
A[rainbow_wave, 24] <= P[c:-6%] (127,0,255), P[c:20%](0,0,255), P[c:45%](0,255,0), P[c:70%](255,255,0), P[c:95%](255,0,0), P[c:120%](127,0,255);
A[rainbow_wave, 25] <= P[c:-4%] (127,0,255), P[c:22%](0,0,255), P[c:47%](0,255,0), P[c:72%](255,255,0), P[c:97%](255,0,0), P[c:122%](127,0,255);
A[rainbow_wave, 26] <= P[c:-2%] (127,0,255), P[c:24%](0,0,255), P[c:49%](0,255,0), P[c:74%](255,255,0), P[c:99%](255,0,0), P[c:124%](127,0,255);
A[rainbow_wave, 28] <= P[c:0%] (127,0,255), P[c:25%](0,0,255), P[c:50%](0,255,0), P[c:75%](255,255,0), P[c:100%](255,0,0);
A[rainbow_wave, 29] <= P[c:-24%](255,0,0), P[c:2%] (127,0,255), P[c:27%](0,0,255), P[c:52%](0,255,0), P[c:77%](255,255,0), P[c:102%](255,0,0);
A[rainbow_wave, 30] <= P[c:-22%](255,0,0), P[c:4%] (127,0,255), P[c:29%](0,0,255), P[c:54%](0,255,0), P[c:79%](255,255,0), P[c:104%](255,0,0);
A[rainbow_wave, 31] <= P[c:-20%](255,0,0), P[c:6%] (127,0,255), P[c:31%](0,0,255), P[c:56%](0,255,0), P[c:81%](255,255,0), P[c:106%](255,0,0);
A[rainbow_wave, 32] <= P[c:-18%](255,0,0), P[c:8%] (127,0,255), P[c:33%](0,0,255), P[c:58%](0,255,0), P[c:83%](255,255,0), P[c:108%](255,0,0);
A[rainbow_wave, 33] <= P[c:-16%](255,0,0), P[c:10%](127,0,255), P[c:35%](0,0,255), P[c:60%](0,255,0), P[c:85%](255,255,0), P[c:110%](255,0,0);
A[rainbow_wave, 34] <= P[c:-14%](255,0,0), P[c:12%](127,0,255), P[c:37%](0,0,255), P[c:62%](0,255,0), P[c:87%](255,255,0), P[c:112%](255,0,0);
A[rainbow_wave, 35] <= P[c:-12%](255,0,0), P[c:14%](127,0,255), P[c:39%](0,0,255), P[c:64%](0,255,0), P[c:89%](255,255,0), P[c:114%](255,0,0);
A[rainbow_wave, 36] <= P[c:-10%](255,0,0), P[c:16%](127,0,255), P[c:41%](0,0,255), P[c:66%](0,255,0), P[c:91%](255,255,0), P[c:116%](255,0,0);
A[rainbow_wave, 37] <= P[c:-8%] (255,0,0), P[c:18%](127,0,255), P[c:43%](0,0,255), P[c:68%](0,255,0), P[c:93%](255,255,0), P[c:118%](255,0,0);
A[rainbow_wave, 38] <= P[c:-6%] (255,0,0), P[c:20%](127,0,255), P[c:45%](0,0,255), P[c:70%](0,255,0), P[c:95%](255,255,0), P[c:120%](255,0,0);
A[rainbow_wave, 39] <= P[c:-4%] (255,0,0), P[c:22%](127,0,255), P[c:47%](0,0,255), P[c:72%](0,255,0), P[c:97%](255,255,0), P[c:122%](255,0,0);
A[rainbow_wave, 40] <= P[c:-2%] (255,0,0), P[c:24%](127,0,255), P[c:49%](0,0,255), P[c:74%](0,255,0), P[c:99%](255,255,0), P[c:124%](255,0,0);
A[rainbow_wave, 41] <= P[c:0%] (255,0,0), P[c:25%](127,0,255), P[c:50%](0,0,255), P[c:75%](0,255,0), P[c:100%](255,255,0);
A[rainbow_wave, 42] <= P[c:-24%](255,255,0), P[c:2%] (255,0,0), P[c:27%](127,0,255), P[c:52%](0,0,255), P[c:77%](0,255,0), P[c:102%](255,255,0);
A[rainbow_wave, 43] <= P[c:-22%](255,255,0), P[c:4%] (255,0,0), P[c:29%](127,0,255), P[c:54%](0,0,255), P[c:79%](0,255,0), P[c:104%](255,255,0);
A[rainbow_wave, 44] <= P[c:-20%](255,255,0), P[c:6%] (255,0,0), P[c:31%](127,0,255), P[c:56%](0,0,255), P[c:81%](0,255,0), P[c:106%](255,255,0);
A[rainbow_wave, 45] <= P[c:-18%](255,255,0), P[c:8%] (255,0,0), P[c:33%](127,0,255), P[c:58%](0,0,255), P[c:83%](0,255,0), P[c:108%](255,255,0);
A[rainbow_wave, 46] <= P[c:-16%](255,255,0), P[c:10%](255,0,0), P[c:35%](127,0,255), P[c:60%](0,0,255), P[c:85%](0,255,0), P[c:110%](255,255,0);
A[rainbow_wave, 47] <= P[c:-14%](255,255,0), P[c:12%](255,0,0), P[c:37%](127,0,255), P[c:62%](0,0,255), P[c:87%](0,255,0), P[c:112%](255,255,0);
A[rainbow_wave, 48] <= P[c:-12%](255,255,0), P[c:14%](255,0,0), P[c:39%](127,0,255), P[c:64%](0,0,255), P[c:89%](0,255,0), P[c:114%](255,255,0);
A[rainbow_wave, 49] <= P[c:-10%](255,255,0), P[c:16%](255,0,0), P[c:41%](127,0,255), P[c:66%](0,0,255), P[c:91%](0,255,0), P[c:116%](255,255,0);
A[rainbow_wave, 50] <= P[c:-8%] (255,255,0), P[c:18%](255,0,0), P[c:43%](127,0,255), P[c:68%](0,0,255), P[c:93%](0,255,0), P[c:118%](255,255,0);
A[rainbow_wave, 51] <= P[c:-6%] (255,255,0), P[c:20%](255,0,0), P[c:45%](127,0,255), P[c:70%](0,0,255), P[c:95%](0,255,0), P[c:120%](255,255,0);
A[rainbow_wave, 52] <= P[c:-4%] (255,255,0), P[c:22%](255,0,0), P[c:47%](127,0,255), P[c:72%](0,0,255), P[c:97%](0,255,0), P[c:122%](255,255,0);
A[rainbow_wave, 53] <= P[c:-2%] (255,255,0), P[c:24%](255,0,0), P[c:49%](127,0,255), P[c:74%](0,0,255), P[c:99%](0,255,0), P[c:124%](255,255,0);
A[rainbow_wave, 54] <= P[c:0%] (255,255,0), P[c:25%](255,0,0), P[c:50%](127,0,255), P[c:75%](0,0,255), P[c:100%](0,255,0);
A[rainbow_wave, 55] <= P[c:-24%](0,255,0), P[c:2%] (255,255,0), P[c:27%](255,0,0), P[c:52%](127,0,255), P[c:77%](0,0,255), P[c:102%](0,255,0);
A[rainbow_wave, 56] <= P[c:-22%](0,255,0), P[c:4%] (255,255,0), P[c:29%](255,0,0), P[c:54%](127,0,255), P[c:79%](0,0,255), P[c:104%](0,255,0);
A[rainbow_wave, 57] <= P[c:-20%](0,255,0), P[c:6%] (255,255,0), P[c:31%](255,0,0), P[c:56%](127,0,255), P[c:81%](0,0,255), P[c:106%](0,255,0);
A[rainbow_wave, 58] <= P[c:-18%](0,255,0), P[c:8%] (255,255,0), P[c:33%](255,0,0), P[c:58%](127,0,255), P[c:83%](0,0,255), P[c:108%](0,255,0);
A[rainbow_wave, 59] <= P[c:-16%](0,255,0), P[c:10%](255,255,0), P[c:35%](255,0,0), P[c:60%](127,0,255), P[c:85%](0,0,255), P[c:110%](0,255,0);
A[rainbow_wave, 60] <= P[c:-14%](0,255,0), P[c:12%](255,255,0), P[c:37%](255,0,0), P[c:62%](127,0,255), P[c:87%](0,0,255), P[c:112%](0,255,0);
A[rainbow_wave, 61] <= P[c:-12%](0,255,0), P[c:14%](255,255,0), P[c:39%](255,0,0), P[c:64%](127,0,255), P[c:89%](0,0,255), P[c:114%](0,255,0);
A[rainbow_wave, 62] <= P[c:-10%](0,255,0), P[c:16%](255,255,0), P[c:41%](255,0,0), P[c:66%](127,0,255), P[c:91%](0,0,255), P[c:116%](0,255,0);
A[rainbow_wave, 63] <= P[c:-8%] (0,255,0), P[c:18%](255,255,0), P[c:43%](255,0,0), P[c:68%](127,0,255), P[c:93%](0,0,255), P[c:118%](0,255,0);
A[rainbow_wave, 64] <= P[c:-6%] (0,255,0), P[c:20%](255,255,0), P[c:45%](255,0,0), P[c:70%](127,0,255), P[c:95%](0,0,255), P[c:120%](0,255,0);
A[rainbow_wave, 65] <= P[c:-4%] (0,255,0), P[c:22%](255,255,0), P[c:47%](255,0,0), P[c:72%](127,0,255), P[c:97%](0,0,255), P[c:122%](0,255,0);
A[rainbow_wave, 66] <= P[c:-2%] (0,255,0), P[c:24%](255,255,0), P[c:49%](255,0,0), P[c:74%](127,0,255), P[c:99%](0,0,255), P[c:124%](0,255,0);
# Pastel Rainbow
A[pastel_wave] <= framedelay:3, framestretch, loop, replace:clear, pfunc:interp;
A[pastel_wave, 1] <= P[c:0%] (40,255,40), P[c:25%](255,255,40), P[c:50%](255,40,40), P[c:75%](127,40,255), P[c:100%](40,40,255);
A[pastel_wave, 2] <= P[c:-24%](40,40,255), P[c:2%] (40,255,40), P[c:27%](255,255,40), P[c:52%](255,40,40), P[c:77%](127,40,255), P[c:102%](40,40,255);
A[pastel_wave, 3] <= P[c:-22%](40,40,255), P[c:4%] (40,255,40), P[c:29%](255,255,40), P[c:54%](255,40,40), P[c:79%](127,40,255), P[c:104%](40,40,255);
A[pastel_wave, 4] <= P[c:-20%](40,40,255), P[c:6%] (40,255,40), P[c:31%](255,255,40), P[c:56%](255,40,40), P[c:81%](127,40,255), P[c:106%](40,40,255);
A[pastel_wave, 5] <= P[c:-18%](40,40,255), P[c:8%] (40,255,40), P[c:33%](255,255,40), P[c:58%](255,40,40), P[c:83%](127,40,255), P[c:108%](40,40,255);
A[pastel_wave, 6] <= P[c:-16%](40,40,255), P[c:10%](40,255,40), P[c:35%](255,255,40), P[c:60%](255,40,40), P[c:85%](127,40,255), P[c:110%](40,40,255);
A[pastel_wave, 7] <= P[c:-14%](40,40,255), P[c:12%](40,255,40), P[c:37%](255,255,40), P[c:62%](255,40,40), P[c:87%](127,40,255), P[c:112%](40,40,255);
A[pastel_wave, 8] <= P[c:-12%](40,40,255), P[c:14%](40,255,40), P[c:39%](255,255,40), P[c:64%](255,40,40), P[c:89%](127,40,255), P[c:114%](40,40,255);
A[pastel_wave, 9] <= P[c:-10%](40,40,255), P[c:16%](40,255,40), P[c:41%](255,255,40), P[c:66%](255,40,40), P[c:91%](127,40,255), P[c:116%](40,40,255);
A[pastel_wave, 10] <= P[c:-8%] (40,40,255), P[c:18%](40,255,40), P[c:43%](255,255,40), P[c:68%](255,40,40), P[c:93%](127,40,255), P[c:118%](40,40,255);
A[pastel_wave, 11] <= P[c:-6%] (40,40,255), P[c:20%](40,255,40), P[c:45%](255,255,40), P[c:70%](255,40,40), P[c:95%](127,40,255), P[c:120%](40,40,255);
A[pastel_wave, 12] <= P[c:-4%] (40,40,255), P[c:22%](40,255,40), P[c:47%](255,255,40), P[c:72%](255,40,40), P[c:97%](127,40,255), P[c:122%](40,40,255);
A[pastel_wave, 13] <= P[c:-2%] (40,40,255), P[c:24%](40,255,40), P[c:49%](255,255,40), P[c:74%](255,40,40), P[c:99%](127,40,255), P[c:124%](40,40,255);
A[pastel_wave, 14] <= P[c:0%] (40,40,255), P[c:25%](40,255,40), P[c:50%](255,255,40), P[c:75%](255,40,40), P[c:100%](127,40,255);
A[pastel_wave, 15] <= P[c:-24%](127,40,255), P[c:2%] (40,40,255), P[c:27%](40,255,40), P[c:52%](255,255,40), P[c:77%](255,40,40), P[c:102%](127,40,255);
A[pastel_wave, 16] <= P[c:-22%](127,40,255), P[c:4%] (40,40,255), P[c:29%](40,255,40), P[c:54%](255,255,40), P[c:79%](255,40,40), P[c:104%](127,40,255);
A[pastel_wave, 17] <= P[c:-20%](127,40,255), P[c:6%] (40,40,255), P[c:31%](40,255,40), P[c:56%](255,255,40), P[c:81%](255,40,40), P[c:106%](127,40,255);
A[pastel_wave, 18] <= P[c:-18%](127,40,255), P[c:8%] (40,40,255), P[c:33%](40,255,40), P[c:58%](255,255,40), P[c:83%](255,40,40), P[c:108%](127,40,255);
A[pastel_wave, 19] <= P[c:-16%](127,40,255), P[c:10%](40,40,255), P[c:35%](40,255,40), P[c:60%](255,255,40), P[c:85%](255,40,40), P[c:110%](127,40,255);
A[pastel_wave, 20] <= P[c:-14%](127,40,255), P[c:12%](40,40,255), P[c:37%](40,255,40), P[c:62%](255,255,40), P[c:87%](255,40,40), P[c:112%](127,40,255);
A[pastel_wave, 21] <= P[c:-12%](127,40,255), P[c:14%](40,40,255), P[c:39%](40,255,40), P[c:64%](255,255,40), P[c:89%](255,40,40), P[c:114%](127,40,255);
A[pastel_wave, 22] <= P[c:-10%](127,40,255), P[c:16%](40,40,255), P[c:41%](40,255,40), P[c:66%](255,255,40), P[c:91%](255,40,40), P[c:116%](127,40,255);
A[pastel_wave, 23] <= P[c:-8%] (127,40,255), P[c:18%](40,40,255), P[c:43%](40,255,40), P[c:68%](255,255,40), P[c:93%](255,40,40), P[c:118%](127,40,255);
A[pastel_wave, 24] <= P[c:-6%] (127,40,255), P[c:20%](40,40,255), P[c:45%](40,255,40), P[c:70%](255,255,40), P[c:95%](255,40,40), P[c:120%](127,40,255);
A[pastel_wave, 25] <= P[c:-4%] (127,40,255), P[c:22%](40,40,255), P[c:47%](40,255,40), P[c:72%](255,255,40), P[c:97%](255,40,40), P[c:122%](127,40,255);
A[pastel_wave, 26] <= P[c:-2%] (127,40,255), P[c:24%](40,40,255), P[c:49%](40,255,40), P[c:74%](255,255,40), P[c:99%](255,40,40), P[c:124%](127,40,255);
A[pastel_wave, 28] <= P[c:0%] (127,40,255), P[c:25%](40,40,255), P[c:50%](40,255,40), P[c:75%](255,255,40), P[c:100%](255,40,40);
A[pastel_wave, 29] <= P[c:-24%](255,40,40), P[c:2%] (127,40,255), P[c:27%](40,40,255), P[c:52%](40,255,40), P[c:77%](255,255,40), P[c:102%](255,40,40);
A[pastel_wave, 30] <= P[c:-22%](255,40,40), P[c:4%] (127,40,255), P[c:29%](40,40,255), P[c:54%](40,255,40), P[c:79%](255,255,40), P[c:104%](255,40,40);
A[pastel_wave, 31] <= P[c:-20%](255,40,40), P[c:6%] (127,40,255), P[c:31%](40,40,255), P[c:56%](40,255,40), P[c:81%](255,255,40), P[c:106%](255,40,40);
A[pastel_wave, 32] <= P[c:-18%](255,40,40), P[c:8%] (127,40,255), P[c:33%](40,40,255), P[c:58%](40,255,40), P[c:83%](255,255,40), P[c:108%](255,40,40);
A[pastel_wave, 33] <= P[c:-16%](255,40,40), P[c:10%](127,40,255), P[c:35%](40,40,255), P[c:60%](40,255,40), P[c:85%](255,255,40), P[c:110%](255,40,40);
A[pastel_wave, 34] <= P[c:-14%](255,40,40), P[c:12%](127,40,255), P[c:37%](40,40,255), P[c:62%](40,255,40), P[c:87%](255,255,40), P[c:112%](255,40,40);
A[pastel_wave, 35] <= P[c:-12%](255,40,40), P[c:14%](127,40,255), P[c:39%](40,40,255), P[c:64%](40,255,40), P[c:89%](255,255,40), P[c:114%](255,40,40);
A[pastel_wave, 36] <= P[c:-10%](255,40,40), P[c:16%](127,40,255), P[c:41%](40,40,255), P[c:66%](40,255,40), P[c:91%](255,255,40), P[c:116%](255,40,40);
A[pastel_wave, 37] <= P[c:-8%] (255,40,40), P[c:18%](127,40,255), P[c:43%](40,40,255), P[c:68%](40,255,40), P[c:93%](255,255,40), P[c:118%](255,40,40);
A[pastel_wave, 38] <= P[c:-6%] (255,40,40), P[c:20%](127,40,255), P[c:45%](40,40,255), P[c:70%](40,255,40), P[c:95%](255,255,40), P[c:120%](255,40,40);
A[pastel_wave, 39] <= P[c:-4%] (255,40,40), P[c:22%](127,40,255), P[c:47%](40,40,255), P[c:72%](40,255,40), P[c:97%](255,255,40), P[c:122%](255,40,40);
A[pastel_wave, 40] <= P[c:-2%] (255,40,40), P[c:24%](127,40,255), P[c:49%](40,40,255), P[c:74%](40,255,40), P[c:99%](255,255,40), P[c:124%](255,40,40);
A[pastel_wave, 41] <= P[c:0%] (255,40,40), P[c:25%](127,40,255), P[c:50%](40,40,255), P[c:75%](40,255,40), P[c:100%](255,255,40);
A[pastel_wave, 42] <= P[c:-24%](255,255,40), P[c:2%] (255,40,40), P[c:27%](127,40,255), P[c:52%](40,40,255), P[c:77%](40,255,40), P[c:102%](255,255,40);
A[pastel_wave, 43] <= P[c:-22%](255,255,40), P[c:4%] (255,40,40), P[c:29%](127,40,255), P[c:54%](40,40,255), P[c:79%](40,255,40), P[c:104%](255,255,40);
A[pastel_wave, 44] <= P[c:-20%](255,255,40), P[c:6%] (255,40,40), P[c:31%](127,40,255), P[c:56%](40,40,255), P[c:81%](40,255,40), P[c:106%](255,255,40);
A[pastel_wave, 45] <= P[c:-18%](255,255,40), P[c:8%] (255,40,40), P[c:33%](127,40,255), P[c:58%](40,40,255), P[c:83%](40,255,40), P[c:108%](255,255,40);
A[pastel_wave, 46] <= P[c:-16%](255,255,40), P[c:10%](255,40,40), P[c:35%](127,40,255), P[c:60%](40,40,255), P[c:85%](40,255,40), P[c:110%](255,255,40);
A[pastel_wave, 47] <= P[c:-14%](255,255,40), P[c:12%](255,40,40), P[c:37%](127,40,255), P[c:62%](40,40,255), P[c:87%](40,255,40), P[c:112%](255,255,40);
A[pastel_wave, 48] <= P[c:-12%](255,255,40), P[c:14%](255,40,40), P[c:39%](127,40,255), P[c:64%](40,40,255), P[c:89%](40,255,40), P[c:114%](255,255,40);
A[pastel_wave, 49] <= P[c:-10%](255,255,40), P[c:16%](255,40,40), P[c:41%](127,40,255), P[c:66%](40,40,255), P[c:91%](40,255,40), P[c:116%](255,255,40);
A[pastel_wave, 50] <= P[c:-8%] (255,255,40), P[c:18%](255,40,40), P[c:43%](127,40,255), P[c:68%](40,40,255), P[c:93%](40,255,40), P[c:118%](255,255,40);
A[pastel_wave, 51] <= P[c:-6%] (255,255,40), P[c:20%](255,40,40), P[c:45%](127,40,255), P[c:70%](40,40,255), P[c:95%](40,255,40), P[c:120%](255,255,40);
A[pastel_wave, 52] <= P[c:-4%] (255,255,40), P[c:22%](255,40,40), P[c:47%](127,40,255), P[c:72%](40,40,255), P[c:97%](40,255,40), P[c:122%](255,255,40);
A[pastel_wave, 53] <= P[c:-2%] (255,255,40), P[c:24%](255,40,40), P[c:49%](127,40,255), P[c:74%](40,40,255), P[c:99%](40,255,40), P[c:124%](255,255,40);
A[pastel_wave, 54] <= P[c:0%] (255,255,40), P[c:25%](255,40,40), P[c:50%](127,40,255), P[c:75%](40,40,255), P[c:100%](40,255,40);
A[pastel_wave, 55] <= P[c:-24%](40,255,40), P[c:2%] (255,255,40), P[c:27%](255,40,40), P[c:52%](127,40,255), P[c:77%](40,40,255), P[c:102%](40,255,40);
A[pastel_wave, 56] <= P[c:-22%](40,255,40), P[c:4%] (255,255,40), P[c:29%](255,40,40), P[c:54%](127,40,255), P[c:79%](40,40,255), P[c:104%](40,255,40);
A[pastel_wave, 57] <= P[c:-20%](40,255,40), P[c:6%] (255,255,40), P[c:31%](255,40,40), P[c:56%](127,40,255), P[c:81%](40,40,255), P[c:106%](40,255,40);
A[pastel_wave, 58] <= P[c:-18%](40,255,40), P[c:8%] (255,255,40), P[c:33%](255,40,40), P[c:58%](127,40,255), P[c:83%](40,40,255), P[c:108%](40,255,40);
A[pastel_wave, 59] <= P[c:-16%](40,255,40), P[c:10%](255,255,40), P[c:35%](255,40,40), P[c:60%](127,40,255), P[c:85%](40,40,255), P[c:110%](40,255,40);
A[pastel_wave, 60] <= P[c:-14%](40,255,40), P[c:12%](255,255,40), P[c:37%](255,40,40), P[c:62%](127,40,255), P[c:87%](40,40,255), P[c:112%](40,255,40);
A[pastel_wave, 61] <= P[c:-12%](40,255,40), P[c:14%](255,255,40), P[c:39%](255,40,40), P[c:64%](127,40,255), P[c:89%](40,40,255), P[c:114%](40,255,40);
A[pastel_wave, 62] <= P[c:-10%](40,255,40), P[c:16%](255,255,40), P[c:41%](255,40,40), P[c:66%](127,40,255), P[c:91%](40,40,255), P[c:116%](40,255,40);
A[pastel_wave, 63] <= P[c:-8%] (40,255,40), P[c:18%](255,255,40), P[c:43%](255,40,40), P[c:68%](127,40,255), P[c:93%](40,40,255), P[c:118%](40,255,40);
A[pastel_wave, 64] <= P[c:-6%] (40,255,40), P[c:20%](255,255,40), P[c:45%](255,40,40), P[c:70%](127,40,255), P[c:95%](40,40,255), P[c:120%](40,255,40);
A[pastel_wave, 65] <= P[c:-4%] (40,255,40), P[c:22%](255,255,40), P[c:47%](255,40,40), P[c:72%](127,40,255), P[c:97%](40,40,255), P[c:122%](40,255,40);
A[pastel_wave, 66] <= P[c:-2%] (40,255,40), P[c:24%](255,255,40), P[c:49%](255,40,40), P[c:74%](127,40,255), P[c:99%](40,40,255), P[c:124%](40,255,40);
# Kira Wave
A[kira_wave] <= framedelay:3, framestretch, loop, replace:clear, pfunc:interp;
# Orange, Magneta, Purple, Blue
A[kira_wave, 1] <= P[c:0%] (255,80,0), P[c:33%](239,10,129), P[c:66%](129,10,239), P[c:100%](10,60,255);
A[kira_wave, 2] <= P[c:-31%](10,60,255), P[c:2%] (255,80,0), P[c:36%](239,10,129), P[c:68%](129,10,239), P[c:102%](10,60,255);
A[kira_wave, 3] <= P[c:-29%](10,60,255), P[c:4%] (255,80,0), P[c:38%](239,10,129), P[c:70%](129,10,239), P[c:104%](10,60,255);
A[kira_wave, 4] <= P[c:-27%](10,60,255), P[c:6%] (255,80,0), P[c:40%](239,10,129), P[c:72%](129,10,239), P[c:106%](10,60,255);
A[kira_wave, 5] <= P[c:-25%](10,60,255), P[c:8%] (255,80,0), P[c:42%](239,10,129), P[c:74%](129,10,239), P[c:108%](10,60,255);
A[kira_wave, 6] <= P[c:-23%](10,60,255), P[c:10%](255,80,0), P[c:44%](239,10,129), P[c:76%](129,10,239), P[c:110%](10,60,255);
A[kira_wave, 7] <= P[c:-21%](10,60,255), P[c:12%](255,80,0), P[c:46%](239,10,129), P[c:78%](129,10,239), P[c:112%](10,60,255);
A[kira_wave, 8] <= P[c:-19%](10,60,255), P[c:14%](255,80,0), P[c:48%](239,10,129), P[c:80%](129,10,239), P[c:114%](10,60,255);
A[kira_wave, 9] <= P[c:-17%](10,60,255), P[c:16%](255,80,0), P[c:50%](239,10,129), P[c:82%](129,10,239), P[c:116%](10,60,255);
A[kira_wave, 10] <= P[c:-15%](10,60,255), P[c:18%](255,80,0), P[c:52%](239,10,129), P[c:84%](129,10,239), P[c:118%](10,60,255);
A[kira_wave, 11] <= P[c:-13%](10,60,255), P[c:20%](255,80,0), P[c:54%](239,10,129), P[c:86%](129,10,239), P[c:120%](10,60,255);
A[kira_wave, 12] <= P[c:-11%](10,60,255), P[c:22%](255,80,0), P[c:56%](239,10,129), P[c:88%](129,10,239), P[c:122%](10,60,255);
A[kira_wave, 13] <= P[c:-9%] (10,60,255), P[c:24%](255,80,0), P[c:58%](239,10,129), P[c:90%](129,10,239), P[c:124%](10,60,255);
A[kira_wave, 14] <= P[c:-7%] (10,60,255), P[c:26%](255,80,0), P[c:60%](239,10,129), P[c:92%](129,10,239), P[c:126%](10,60,255);
A[kira_wave, 15] <= P[c:-5%] (10,60,255), P[c:28%](255,80,0), P[c:62%](239,10,129), P[c:94%](129,10,239), P[c:128%](10,60,255);
A[kira_wave, 16] <= P[c:-3%] (10,60,255), P[c:30%](255,80,0), P[c:64%](239,10,129), P[c:96%](129,10,239), P[c:130%](10,60,255);
A[kira_wave, 17] <= P[c:0%] (10,60,255), P[c:33%](255,80,0), P[c:66%](239,10,129), P[c:100%](129,10,239);
A[kira_wave, 18] <= P[c:-31%](129,10,239), P[c:2%] (10,60,255), P[c:36%](255,80,0), P[c:68%](239,10,129), P[c:102%](129,10,239);
A[kira_wave, 19] <= P[c:-29%](129,10,239), P[c:4%] (10,60,255), P[c:38%](255,80,0), P[c:70%](239,10,129), P[c:104%](129,10,239);
A[kira_wave, 20] <= P[c:-27%](129,10,239), P[c:6%] (10,60,255), P[c:40%](255,80,0), P[c:72%](239,10,129), P[c:106%](129,10,239);
A[kira_wave, 21] <= P[c:-25%](129,10,239), P[c:8%] (10,60,255), P[c:42%](255,80,0), P[c:74%](239,10,129), P[c:108%](129,10,239);
A[kira_wave, 22] <= P[c:-23%](129,10,239), P[c:10%](10,60,255), P[c:44%](255,80,0), P[c:76%](239,10,129), P[c:110%](129,10,239);
A[kira_wave, 23] <= P[c:-21%](129,10,239), P[c:12%](10,60,255), P[c:46%](255,80,0), P[c:78%](239,10,129), P[c:112%](129,10,239);
A[kira_wave, 24] <= P[c:-19%](129,10,239), P[c:14%](10,60,255), P[c:48%](255,80,0), P[c:80%](239,10,129), P[c:114%](129,10,239);
A[kira_wave, 25] <= P[c:-17%](129,10,239), P[c:16%](10,60,255), P[c:50%](255,80,0), P[c:82%](239,10,129), P[c:116%](129,10,239);
A[kira_wave, 26] <= P[c:-15%](129,10,239), P[c:18%](10,60,255), P[c:52%](255,80,0), P[c:84%](239,10,129), P[c:118%](129,10,239);
A[kira_wave, 27] <= P[c:-13%](129,10,239), P[c:20%](10,60,255), P[c:54%](255,80,0), P[c:86%](239,10,129), P[c:120%](129,10,239);
A[kira_wave, 28] <= P[c:-11%](129,10,239), P[c:22%](10,60,255), P[c:56%](255,80,0), P[c:88%](239,10,129), P[c:122%](129,10,239);
A[kira_wave, 29] <= P[c:-9%] (129,10,239), P[c:24%](10,60,255), P[c:58%](255,80,0), P[c:90%](239,10,129), P[c:124%](129,10,239);
A[kira_wave, 30] <= P[c:-7%] (129,10,239), P[c:26%](10,60,255), P[c:60%](255,80,0), P[c:92%](239,10,129), P[c:126%](129,10,239);
A[kira_wave, 31] <= P[c:-5%] (129,10,239), P[c:28%](10,60,255), P[c:62%](255,80,0), P[c:94%](239,10,129), P[c:128%](129,10,239);
A[kira_wave, 32] <= P[c:-3%] (129,10,239), P[c:30%](10,60,255), P[c:64%](255,80,0), P[c:96%](239,10,129), P[c:130%](129,10,239);
A[kira_wave, 33] <= P[c:0%] (129,10,239), P[c:33%](10,60,255), P[c:66%](255,80,0), P[c:100%](239,10,129);
A[kira_wave, 34] <= P[c:-31%](239,10,129), P[c:2%] (129,10,239), P[c:36%](10,60,255), P[c:68%](255,80,0), P[c:102%](239,10,129);
A[kira_wave, 35] <= P[c:-29%](239,10,129), P[c:4%] (129,10,239), P[c:38%](10,60,255), P[c:70%](255,80,0), P[c:104%](239,10,129);
A[kira_wave, 36] <= P[c:-27%](239,10,129), P[c:6%] (129,10,239), P[c:40%](10,60,255), P[c:72%](255,80,0), P[c:106%](239,10,129);
A[kira_wave, 37] <= P[c:-25%](239,10,129), P[c:8%] (129,10,239), P[c:42%](10,60,255), P[c:74%](255,80,0), P[c:108%](239,10,129);
A[kira_wave, 38] <= P[c:-23%](239,10,129), P[c:10%](129,10,239), P[c:44%](10,60,255), P[c:76%](255,80,0), P[c:110%](239,10,129);
A[kira_wave, 39] <= P[c:-21%](239,10,129), P[c:12%](129,10,239), P[c:46%](10,60,255), P[c:78%](255,80,0), P[c:112%](239,10,129);
A[kira_wave, 40] <= P[c:-19%](239,10,129), P[c:14%](129,10,239), P[c:48%](10,60,255), P[c:80%](255,80,0), P[c:114%](239,10,129);
A[kira_wave, 41] <= P[c:-17%](239,10,129), P[c:16%](129,10,239), P[c:50%](10,60,255), P[c:82%](255,80,0), P[c:116%](239,10,129);
A[kira_wave, 42] <= P[c:-15%](239,10,129), P[c:18%](129,10,239), P[c:52%](10,60,255), P[c:84%](255,80,0), P[c:118%](239,10,129);
A[kira_wave, 43] <= P[c:-13%](239,10,129), P[c:20%](129,10,239), P[c:54%](10,60,255), P[c:86%](255,80,0), P[c:120%](239,10,129);
A[kira_wave, 44] <= P[c:-11%](239,10,129), P[c:22%](129,10,239), P[c:56%](10,60,255), P[c:88%](255,80,0), P[c:122%](239,10,129);
A[kira_wave, 45] <= P[c:-9%] (239,10,129), P[c:24%](129,10,239), P[c:58%](10,60,255), P[c:90%](255,80,0), P[c:124%](239,10,129);
A[kira_wave, 46] <= P[c:-7%] (239,10,129), P[c:26%](129,10,239), P[c:60%](10,60,255), P[c:92%](255,80,0), P[c:126%](239,10,129);
A[kira_wave, 47] <= P[c:-5%] (239,10,129), P[c:28%](129,10,239), P[c:62%](10,60,255), P[c:94%](255,80,0), P[c:128%](239,10,129);
A[kira_wave, 48] <= P[c:-3%] (239,10,129), P[c:30%](129,10,239), P[c:64%](10,60,255), P[c:96%](255,80,0), P[c:130%](239,10,129);
A[kira_wave, 49] <= P[c:0%] (239,10,129), P[c:33%](129,10,239), P[c:66%](10,60,255), P[c:100%](255,80,0);
A[kira_wave, 50] <= P[c:-31%](255,80,0), P[c:2%] (239,10,129), P[c:36%](129,10,239), P[c:68%](10,60,255), P[c:102%](255,80,0);
A[kira_wave, 51] <= P[c:-29%](255,80,0), P[c:4%] (239,10,129), P[c:38%](129,10,239), P[c:70%](10,60,255), P[c:104%](255,80,0);
A[kira_wave, 52] <= P[c:-27%](255,80,0), P[c:6%] (239,10,129), P[c:40%](129,10,239), P[c:72%](10,60,255), P[c:106%](255,80,0);
A[kira_wave, 53] <= P[c:-25%](255,80,0), P[c:8%] (239,10,129), P[c:42%](129,10,239), P[c:74%](10,60,255), P[c:108%](255,80,0);
A[kira_wave, 54] <= P[c:-23%](255,80,0), P[c:10%](239,10,129), P[c:44%](129,10,239), P[c:76%](10,60,255), P[c:110%](255,80,0);
A[kira_wave, 55] <= P[c:-21%](255,80,0), P[c:12%](239,10,129), P[c:46%](129,10,239), P[c:78%](10,60,255), P[c:112%](255,80,0);
A[kira_wave, 56] <= P[c:-19%](255,80,0), P[c:14%](239,10,129), P[c:48%](129,10,239), P[c:80%](10,60,255), P[c:114%](255,80,0);
A[kira_wave, 57] <= P[c:-17%](255,80,0), P[c:16%](239,10,129), P[c:50%](129,10,239), P[c:82%](10,60,255), P[c:116%](255,80,0);
A[kira_wave, 58] <= P[c:-15%](255,80,0), P[c:18%](239,10,129), P[c:52%](129,10,239), P[c:84%](10,60,255), P[c:118%](255,80,0);
A[kira_wave, 59] <= P[c:-13%](255,80,0), P[c:20%](239,10,129), P[c:54%](129,10,239), P[c:86%](10,60,255), P[c:120%](255,80,0);
A[kira_wave, 61] <= P[c:-11%](255,80,0), P[c:22%](239,10,129), P[c:56%](129,10,239), P[c:88%](10,60,255), P[c:122%](255,80,0);
A[kira_wave, 62] <= P[c:-9%] (255,80,0), P[c:24%](239,10,129), P[c:58%](129,10,239), P[c:90%](10,60,255), P[c:124%](255,80,0);
A[kira_wave, 63] <= P[c:-7%] (255,80,0), P[c:26%](239,10,129), P[c:60%](129,10,239), P[c:92%](10,60,255), P[c:126%](255,80,0);
A[kira_wave, 64] <= P[c:-5%] (255,80,0), P[c:28%](239,10,129), P[c:62%](129,10,239), P[c:94%](10,60,255), P[c:128%](255,80,0);
A[kira_wave, 65] <= P[c:-3%] (255,80,0), P[c:30%](239,10,129), P[c:64%](129,10,239), P[c:96%](10,60,255), P[c:130%](255,80,0);
### Color-Select Keys ###
A[keys_only_rotation] <= pfunc:interp, replace:clear;
A[keys_only_rotation, 1] <= S1(0xC1, 0x2D, 0x1D), S114(0xC1, 0x2D, 0x1D);
A[keys_only_rotation, 2] <= S1(0xEE, 0x6D, 0x28), S114(0xEE, 0x6D, 0x28);
A[keys_only_rotation, 3] <= S1(0xE0, 0x9E, 0x3B), S114(0xE0, 0x9E, 0x3B);
A[keys_only_rotation, 4] <= S1(0xE5, 0xC9, 0x43), S114(0xE5, 0xC9, 0x43);
A[keys_only_rotation, 5] <= S1(0x1E, 0xB8, 0x6D), S114(0x1E, 0xB8, 0x6D);
A[keys_only_rotation, 6] <= S1(0x00, 0xB3, 0xA6), S114(0x00, 0xB3, 0xA6);
A[keys_only_rotation, 7] <= S1(0x20, 0x82, 0xC6), S114(0x20, 0x82, 0xC6);
A[keys_only_rotation, 8] <= S1(0x43, 0x54, 0xC1), S114(0x43, 0x54, 0xC1);
A[keys_only_rotation, 9] <= S1(0x71, 0x1C, 0x9E), S114(0x71, 0x1C, 0x9E);
A[keys_only_rotation, 10] <= S1(0xCD, 0x3B, 0x70), S114(0xCD, 0x3B, 0x70);
A[keys_only_rotation, 11] <= S1(0xB8, 0x34, 0x3E), S114(0xB8, 0x34, 0x3E);
# Pastel Rainbow
A[keys_only_rotation, 12] <= S1(40,255,40), S114(40,255,40);
A[keys_only_rotation, 13] <= S1(255,255,40), S114(255,255,40);
A[keys_only_rotation, 14] <= S1(255,40,40), S114(255,40,40);
A[keys_only_rotation, 15] <= S1(127,40,255), S114(127,40,255);
A[keys_only_rotation, 16] <= S1(40,40,255), S114(40,40,255);
# Kira Colors
A[keys_only_rotation, 17] <= S1(255,80,0), S114(255,80,0);
A[keys_only_rotation, 18] <= S1(239,10,129), S114(239,10,129);
A[keys_only_rotation, 19] <= S1(129,10,239), S114(129,10,239);
A[keys_only_rotation, 20] <= S1(10,60,255), S114(10,60,255);
### Color-Select Underglow ###
A[underlighting_only_rotation] <= replace:clear;
A[underlighting_only_rotation, 1] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0xC1, 0x2D, 0x1D);
A[underlighting_only_rotation, 2] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0xEE, 0x6D, 0x28);
A[underlighting_only_rotation, 3] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0xE0, 0x9E, 0x3B);
A[underlighting_only_rotation, 4] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0xE5, 0xC9, 0x43);
A[underlighting_only_rotation, 5] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0x1E, 0xB8, 0x6D);
A[underlighting_only_rotation, 6] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0x00, 0xB3, 0xA6);
A[underlighting_only_rotation, 7] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0x20, 0x82, 0xC6);
A[underlighting_only_rotation, 8] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0x43, 0x54, 0xC1);
A[underlighting_only_rotation, 9] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0x71, 0x1C, 0x9E);
A[underlighting_only_rotation, 10] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0xCD, 0x3B, 0x70);
A[underlighting_only_rotation, 11] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](0xB8, 0x34, 0x3E);
# Pastel Rainbow
A[underlighting_only_rotation, 12] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](40,255,40);
A[underlighting_only_rotation, 13] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](255,255,40);
A[underlighting_only_rotation, 14] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](255,40,40);
A[underlighting_only_rotation, 15] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](127,40,255);
A[underlighting_only_rotation, 16] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](40,40,255);
# Kira Colors
A[underlighting_only_rotation, 17] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](255,80,0);
A[underlighting_only_rotation, 18] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](239,10,129);
A[underlighting_only_rotation, 19] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](129,10,239);
A[underlighting_only_rotation, 20] <= P[18,21,24,27,30,115,118,120,122,113,33,108,49,106,65,104,82,85-86,88-90,92,95,98,100](10,60,255);
### Indicator Mode ###
#A[numlock_on] <= loops:1, replace:all;
#A[numlock_on, 1] <= P[126](155,155,155);
#A[capslock_on] <= loops:1, replace:all;
#A[capslock_on, 1] <= P[127](155,155,155);
#A[scrolllock_on] <= loops:1, replace:all;
#A[scrolllock_on, 1] <= P[128](155,155,155);
#A[numlock_off] <= loops:1, replace:all;
#A[numlock_off, 1] <= P[126](0,0,0);
#A[capslock_off] <= loops:1, replace:all;
#A[capslock_off, 1] <= P[127](0,0,0);
#A[scrolllock_off] <= loops:1, replace:all;
#A[scrolllock_off, 1] <= P[128](0,0,0);
#I"NumLock"(On) : A[numlock_on](start);
#I"CapsLock"(On) : A[capslock_on](start);
#I"ScrollLock"(On) : A[scrolllock_on](start);
#I"NumLock"(D) : A[numlock_off](start);
#I"CapsLock"(D) : A[capslock_off](start);
#I"ScrollLock"(D) : A[scrolllock_off](start);
### Fade Configuration ###
# Fade Period
# .start and .end must be between 0 and 15
# .start == (1 << start) - 1
# .end == (1 << end)
# This means incrementing end will exponentially increase the time of the period.
# .start = 0 and .end = 0 disables that time period
# A non-zero start will prevent the LED from dimming to 0 for Off->On and On->Off
# the subsequent Off state afterwards will use the previous start from On->Off for the display value.
# Only 16 fade periods can be used.
KLL_LED_FadePeriod => KLL_LED_FadePeriod_define;
KLL_LED_FadePeriod[0] = "{ .start = 0, .end = 0 }";
KLL_LED_FadePeriod[1] = "{ .start = 0, .end = 4 }";
KLL_LED_FadePeriod[2] = "{ .start = 0, .end = 5 }";
KLL_LED_FadePeriod[3] = "{ .start = 0, .end = 6 }";
KLL_LED_FadePeriod[4] = "{ .start = 0, .end = 7 }";
KLL_LED_FadePeriod[5] = "{ .start = 0, .end = 8 }";
KLL_LED_FadePeriod[6] = "{ .start = 0, .end = 9 }";
KLL_LED_FadePeriod[7] = "{ .start = 0, .end = 10 }";
KLL_LED_FadePeriod[8] = "{ .start = 0, .end = 11 }";
KLL_LED_FadePeriod[9] = "{ .start = 0, .end = 12 }";
KLL_LED_FadePeriod[10] = "{ .start = 5, .end = 6 }";
KLL_LED_FadePeriod[11] = "{ .start = 6, .end = 7 }";
KLL_LED_FadePeriod[12] = "{ .start = 7, .end = 8 }";
KLL_LED_FadePeriod[13] = "{ .start = 8, .end = 9 }";
KLL_LED_FadePeriod[14] = "{ .start = 9, .end = 10 }";
KLL_LED_FadePeriod[15] = "{ .start = 10, .end = 11 }";
# Fade Default Configuration
# There are 4 fade profiles
# 0) Keys
# 1) Underlighting
# 2) Indicators
# 3) Active Layer (uses fade_layer_highlight to refresh)
# Each fade profile has 4 fade period configurations (see above).
# 0) Off to On
# 1) On
# 2) On to Off
# 3) Off
## Keys
KLL_LED_FadeDefaultConfig0 => KLL_LED_FadeDefaultConfig0_define;
KLL_LED_FadeDefaultConfig0[0] = 7;
KLL_LED_FadeDefaultConfig0[1] = 5;
KLL_LED_FadeDefaultConfig0[2] = 7;
KLL_LED_FadeDefaultConfig0[3] = 0;
## Underlighting
KLL_LED_FadeDefaultConfig1 => KLL_LED_FadeDefaultConfig1_define;
KLL_LED_FadeDefaultConfig1[0] = 7;
KLL_LED_FadeDefaultConfig1[1] = 5;
KLL_LED_FadeDefaultConfig1[2] = 7;
KLL_LED_FadeDefaultConfig1[3] = 0;
## Indicators
KLL_LED_FadeDefaultConfig2 => KLL_LED_FadeDefaultConfig2_define;
KLL_LED_FadeDefaultConfig2[0] = 0;
KLL_LED_FadeDefaultConfig2[1] = 0;
KLL_LED_FadeDefaultConfig2[2] = 0;
KLL_LED_FadeDefaultConfig2[3] = 0;
## Active Layer
KLL_LED_FadeDefaultConfig3 => KLL_LED_FadeDefaultConfig3_define;
KLL_LED_FadeDefaultConfig3[0] = 4;
KLL_LED_FadeDefaultConfig3[1] = 4;
KLL_LED_FadeDefaultConfig3[2] = 4;
KLL_LED_FadeDefaultConfig3[3] = 4;
PK TOMN+tl l &